You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ignite.apache.org by vo...@apache.org on 2016/09/19 10:26:51 UTC

[01/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Repository: ignite
Updated Branches:
  refs/heads/ignite-1.6.8-hadoop 2fe0272ba -> 857cdcde6


http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
deleted file mode 100644
index 88d0f80..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.jetbrains.annotations.Nullable;
-
-import java.util.Collection;
-import java.util.UUID;
-
-/**
- * Mock job for planner tests.
- */
-public class HadoopPlannerMockJob implements HadoopJob {
-    /** Input splits. */
-    private final Collection<HadoopInputSplit> splits;
-
-    /** Reducers count. */
-    private final int reducers;
-
-    /**
-     * Constructor.
-     *
-     * @param splits Input splits.
-     * @param reducers Reducers.
-     */
-    public HadoopPlannerMockJob(Collection<HadoopInputSplit> splits, int reducers) {
-        this.splits = splits;
-        this.reducers = reducers;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<HadoopInputSplit> input() throws IgniteCheckedException {
-        return splits;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobInfo info() {
-        return new JobInfo(reducers);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobId id() {
-        throwUnsupported();
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
-        throwUnsupported();
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void initialize(boolean external, UUID nodeId) throws IgniteCheckedException {
-        throwUnsupported();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void dispose(boolean external) throws IgniteCheckedException {
-        throwUnsupported();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
-        throwUnsupported();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
-        throwUnsupported();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cleanupStagingDirectory() {
-        throwUnsupported();
-    }
-
-    /**
-     * Throw {@link UnsupportedOperationException}.
-     */
-    private static void throwUnsupported() {
-        throw new UnsupportedOperationException("Should not be called!");
-    }
-
-    /**
-     * Mocked job info.
-     */
-    private static class JobInfo implements HadoopJobInfo {
-        /** Reducers. */
-        private final int reducers;
-
-        /**
-         * Constructor.
-         *
-         * @param reducers Reducers.
-         */
-        public JobInfo(int reducers) {
-            this.reducers = reducers;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int reducers() {
-            return reducers;
-        }
-
-        /** {@inheritDoc} */
-        @Nullable @Override public String property(String name) {
-            throwUnsupported();
-
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean hasCombiner() {
-            throwUnsupported();
-
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean hasReducer() {
-            throwUnsupported();
-
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public HadoopJob createJob(Class<? extends HadoopJob> jobCls, HadoopJobId jobId, IgniteLogger log,
-            @Nullable String[] libNames) throws IgniteCheckedException {
-            throwUnsupported();
-
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String jobName() {
-            throwUnsupported();
-
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String user() {
-            throwUnsupported();
-
-            return null;
-        }
-    }
-}


[11/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
deleted file mode 100644
index 32880e4..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteClosure;
-import org.apache.ignite.logger.log4j.Log4JLogger;
-import org.apache.ignite.marshaller.jdk.JdkMarshaller;
-
-/**
- * Hadoop external process base class.
- */
-public class HadoopExternalProcessStarter {
-    /** Path to Log4j configuration file. */
-    public static final String DFLT_LOG4J_CONFIG = "config/ignite-log4j.xml";
-
-    /** Arguments. */
-    private Args args;
-
-    /** System out. */
-    private OutputStream out;
-
-    /** System err. */
-    private OutputStream err;
-
-    /**
-     * @param args Parsed arguments.
-     */
-    public HadoopExternalProcessStarter(Args args) {
-        this.args = args;
-    }
-
-    /**
-     * @param cmdArgs Process arguments.
-     */
-    public static void main(String[] cmdArgs) {
-        try {
-            Args args = arguments(cmdArgs);
-
-            new HadoopExternalProcessStarter(args).run();
-        }
-        catch (Exception e) {
-            System.err.println("Failed");
-
-            System.err.println(e.getMessage());
-
-            e.printStackTrace(System.err);
-        }
-    }
-
-    /**
-     *
-     * @throws Exception
-     */
-    public void run() throws Exception {
-        U.setWorkDirectory(args.workDir, U.getIgniteHome());
-
-        File outputDir = outputDirectory();
-
-        initializeStreams(outputDir);
-
-        ExecutorService msgExecSvc = Executors.newFixedThreadPool(
-            Integer.getInteger("MSG_THREAD_POOL_SIZE", Runtime.getRuntime().availableProcessors() * 2));
-
-        IgniteLogger log = logger(outputDir);
-
-        HadoopExternalCommunication comm = new HadoopExternalCommunication(
-            args.nodeId,
-            args.childProcId,
-            new JdkMarshaller(),
-            log,
-            msgExecSvc,
-            "external"
-        );
-
-        comm.start();
-
-        HadoopProcessDescriptor nodeDesc = new HadoopProcessDescriptor(args.nodeId, args.parentProcId);
-        nodeDesc.address(args.addr);
-        nodeDesc.tcpPort(args.tcpPort);
-        nodeDesc.sharedMemoryPort(args.shmemPort);
-
-        HadoopChildProcessRunner runner = new HadoopChildProcessRunner();
-
-        runner.start(comm, nodeDesc, msgExecSvc, log);
-
-        System.err.println("Started");
-        System.err.flush();
-
-        System.setOut(new PrintStream(out));
-        System.setErr(new PrintStream(err));
-    }
-
-    /**
-     * @param outputDir Directory for process output.
-     * @throws Exception
-     */
-    private void initializeStreams(File outputDir) throws Exception {
-        out = new FileOutputStream(new File(outputDir, args.childProcId + ".out"));
-        err = new FileOutputStream(new File(outputDir, args.childProcId + ".err"));
-    }
-
-    /**
-     * @return Path to output directory.
-     * @throws IOException If failed.
-     */
-    private File outputDirectory() throws IOException {
-        File f = new File(args.out);
-
-        if (!f.exists()) {
-            if (!f.mkdirs())
-                throw new IOException("Failed to create output directory: " + args.out);
-        }
-        else {
-            if (f.isFile())
-                throw new IOException("Output directory is a file: " + args.out);
-        }
-
-        return f;
-    }
-
-    /**
-     * @param outputDir Directory for process output.
-     * @return Logger.
-     */
-    private IgniteLogger logger(final File outputDir) {
-        final URL url = U.resolveIgniteUrl(DFLT_LOG4J_CONFIG);
-
-        Log4JLogger logger;
-
-        try {
-            logger = url != null ? new Log4JLogger(url) : new Log4JLogger(true);
-        }
-        catch (IgniteCheckedException e) {
-            System.err.println("Failed to create URL-based logger. Will use default one.");
-
-            e.printStackTrace();
-
-            logger = new Log4JLogger(true);
-        }
-
-        logger.updateFilePath(new IgniteClosure<String, String>() {
-            @Override public String apply(String s) {
-                return new File(outputDir, args.childProcId + ".log").getAbsolutePath();
-            }
-        });
-
-        return logger;
-    }
-
-    /**
-     * @param processArgs Process arguments.
-     * @return Child process instance.
-     */
-    private static Args arguments(String[] processArgs) throws Exception {
-        Args args = new Args();
-
-        for (int i = 0; i < processArgs.length; i++) {
-            String arg = processArgs[i];
-
-            switch (arg) {
-                case "-cpid": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing process ID for '-cpid' parameter");
-
-                    String procIdStr = processArgs[++i];
-
-                    args.childProcId = UUID.fromString(procIdStr);
-
-                    break;
-                }
-
-                case "-ppid": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing process ID for '-ppid' parameter");
-
-                    String procIdStr = processArgs[++i];
-
-                    args.parentProcId = UUID.fromString(procIdStr);
-
-                    break;
-                }
-
-                case "-nid": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing node ID for '-nid' parameter");
-
-                    String nodeIdStr = processArgs[++i];
-
-                    args.nodeId = UUID.fromString(nodeIdStr);
-
-                    break;
-                }
-
-                case "-addr": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing node address for '-addr' parameter");
-
-                    args.addr = processArgs[++i];
-
-                    break;
-                }
-
-                case "-tport": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing tcp port for '-tport' parameter");
-
-                    args.tcpPort = Integer.parseInt(processArgs[++i]);
-
-                    break;
-                }
-
-                case "-sport": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing shared memory port for '-sport' parameter");
-
-                    args.shmemPort = Integer.parseInt(processArgs[++i]);
-
-                    break;
-                }
-
-                case "-out": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing output folder name for '-out' parameter");
-
-                    args.out = processArgs[++i];
-
-                    break;
-                }
-
-                case "-wd": {
-                    if (i == processArgs.length - 1)
-                        throw new Exception("Missing work folder name for '-wd' parameter");
-
-                    args.workDir = processArgs[++i];
-
-                    break;
-                }
-            }
-        }
-
-        return args;
-    }
-
-    /**
-     * Execution arguments.
-     */
-    private static class Args {
-        /** Process ID. */
-        private UUID childProcId;
-
-        /** Process ID. */
-        private UUID parentProcId;
-
-        /** Process ID. */
-        private UUID nodeId;
-
-        /** Node address. */
-        private String addr;
-
-        /** TCP port */
-        private int tcpPort;
-
-        /** Shmem port. */
-        private int shmemPort = -1;
-
-        /** Output folder. */
-        private String out;
-
-        /** Work directory. */
-        private String workDir;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
deleted file mode 100644
index ddf6a20..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Implements basic lifecycle for communication clients.
- */
-public abstract class HadoopAbstractCommunicationClient implements HadoopCommunicationClient {
-    /** Time when this client was last used. */
-    private volatile long lastUsed = U.currentTimeMillis();
-
-    /** Reservations. */
-    private final AtomicInteger reserves = new AtomicInteger();
-
-    /** {@inheritDoc} */
-    @Override public boolean close() {
-        return reserves.compareAndSet(0, -1);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void forceClose() {
-        reserves.set(-1);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean closed() {
-        return reserves.get() == -1;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean reserve() {
-        while (true) {
-            int r = reserves.get();
-
-            if (r == -1)
-                return false;
-
-            if (reserves.compareAndSet(r, r + 1))
-                return true;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void release() {
-        while (true) {
-            int r = reserves.get();
-
-            if (r == -1)
-                return;
-
-            if (reserves.compareAndSet(r, r - 1))
-                return;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean reserved() {
-        return reserves.get() > 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getIdleTime() {
-        return U.currentTimeMillis() - lastUsed;
-    }
-
-    /**
-     * Updates used time.
-     */
-    protected void markUsed() {
-        lastUsed = U.currentTimeMillis();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopAbstractCommunicationClient.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
deleted file mode 100644
index a325a3d..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-
-/**
- *
- */
-public interface HadoopCommunicationClient {
-    /**
-     * @return {@code True} if client has been closed by this call,
-     *      {@code false} if failed to close client (due to concurrent reservation or concurrent close).
-     */
-    public boolean close();
-
-    /**
-     * Forces client close.
-     */
-    public void forceClose();
-
-    /**
-     * @return {@code True} if client is closed;
-     */
-    public boolean closed();
-
-    /**
-     * @return {@code True} if client was reserved, {@code false} otherwise.
-     */
-    public boolean reserve();
-
-    /**
-     * Releases this client by decreasing reservations.
-     */
-    public void release();
-
-    /**
-     * @return {@code True} if client was reserved.
-     */
-    public boolean reserved();
-
-    /**
-     * Gets idle time of this client.
-     *
-     * @return Idle time of this client.
-     */
-    public long getIdleTime();
-
-    /**
-     * @param desc Process descriptor.
-     * @param msg Message to send.
-     * @throws IgniteCheckedException If failed.
-     */
-    public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
deleted file mode 100644
index 1d59a95..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
+++ /dev/null
@@ -1,1460 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.SocketChannel;
-import java.util.Collection;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.util.GridConcurrentFactory;
-import org.apache.ignite.internal.util.GridKeyLock;
-import org.apache.ignite.internal.util.ipc.IpcEndpoint;
-import org.apache.ignite.internal.util.ipc.shmem.IpcOutOfSystemResourcesException;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryClientEndpoint;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
-import org.apache.ignite.internal.util.nio.GridBufferedParser;
-import org.apache.ignite.internal.util.nio.GridNioAsyncNotifyFilter;
-import org.apache.ignite.internal.util.nio.GridNioCodecFilter;
-import org.apache.ignite.internal.util.nio.GridNioFilter;
-import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
-import org.apache.ignite.internal.util.nio.GridNioFuture;
-import org.apache.ignite.internal.util.nio.GridNioMessageTracker;
-import org.apache.ignite.internal.util.nio.GridNioServer;
-import org.apache.ignite.internal.util.nio.GridNioServerListener;
-import org.apache.ignite.internal.util.nio.GridNioServerListenerAdapter;
-import org.apache.ignite.internal.util.nio.GridNioSession;
-import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey;
-import org.apache.ignite.internal.util.typedef.CI1;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.LT;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.internal.util.worker.GridWorker;
-import org.apache.ignite.marshaller.Marshaller;
-import org.apache.ignite.thread.IgniteThread;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentLinkedDeque8;
-
-/**
- * Hadoop external communication class.
- */
-public class HadoopExternalCommunication {
-    /** IPC error message. */
-    public static final String OUT_OF_RESOURCES_TCP_MSG = "Failed to allocate shared memory segment " +
-        "(switching to TCP, may be slower).";
-
-    /** Default port which node sets listener to (value is <tt>47100</tt>). */
-    public static final int DFLT_PORT = 27100;
-
-    /** Default connection timeout (value is <tt>1000</tt>ms). */
-    public static final long DFLT_CONN_TIMEOUT = 1000;
-
-    /** Default Maximum connection timeout (value is <tt>600,000</tt>ms). */
-    public static final long DFLT_MAX_CONN_TIMEOUT = 10 * 60 * 1000;
-
-    /** Default reconnect attempts count (value is <tt>10</tt>). */
-    public static final int DFLT_RECONNECT_CNT = 10;
-
-    /** Default message queue limit per connection (for incoming and outgoing . */
-    public static final int DFLT_MSG_QUEUE_LIMIT = GridNioServer.DFLT_SEND_QUEUE_LIMIT;
-
-    /**
-     * Default count of selectors for TCP server equals to
-     * {@code "Math.min(4, Runtime.getRuntime().availableProcessors())"}.
-     */
-    public static final int DFLT_SELECTORS_CNT = 1;
-
-    /** Node ID meta for session. */
-    private static final int PROCESS_META = GridNioSessionMetaKey.nextUniqueKey();
-
-    /** Handshake timeout meta for session. */
-    private static final int HANDSHAKE_FINISH_META = GridNioSessionMetaKey.nextUniqueKey();
-
-    /** Message tracker meta for session. */
-    private static final int TRACKER_META = GridNioSessionMetaKey.nextUniqueKey();
-
-    /**
-     * Default local port range (value is <tt>100</tt>).
-     * See {@link #setLocalPortRange(int)} for details.
-     */
-    public static final int DFLT_PORT_RANGE = 100;
-
-    /** Default value for {@code TCP_NODELAY} socket option (value is <tt>true</tt>). */
-    public static final boolean DFLT_TCP_NODELAY = true;
-
-    /** Server listener. */
-    private final GridNioServerListener<HadoopMessage> srvLsnr =
-        new GridNioServerListenerAdapter<HadoopMessage>() {
-            @Override public void onConnected(GridNioSession ses) {
-                HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
-
-                assert desc != null : "Received connected notification without finished handshake: " + ses;
-            }
-
-            /** {@inheritDoc} */
-            @Override public void onDisconnected(GridNioSession ses, @Nullable Exception e) {
-                if (log.isDebugEnabled())
-                    log.debug("Closed connection for session: " + ses);
-
-                if (e != null)
-                    U.error(log, "Session disconnected due to exception: " + ses, e);
-
-                HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
-
-                if (desc != null) {
-                    HadoopCommunicationClient rmv = clients.remove(desc.processId());
-
-                    if (rmv != null)
-                        rmv.forceClose();
-                }
-
-                HadoopMessageListener lsnr0 = lsnr;
-
-                if (lsnr0 != null)
-                    // Notify listener about connection close.
-                    lsnr0.onConnectionLost(desc);
-            }
-
-            /** {@inheritDoc} */
-            @Override public void onMessage(GridNioSession ses, HadoopMessage msg) {
-                notifyListener(ses.<HadoopProcessDescriptor>meta(PROCESS_META), msg);
-
-                if (msgQueueLimit > 0) {
-                    GridNioMessageTracker tracker = ses.meta(TRACKER_META);
-
-                    assert tracker != null : "Missing tracker for limited message queue: " + ses;
-
-                    tracker.run();
-                }
-            }
-        };
-
-    /** Logger. */
-    private IgniteLogger log;
-
-    /** Local process descriptor. */
-    private HadoopProcessDescriptor locProcDesc;
-
-    /** Marshaller. */
-    private Marshaller marsh;
-
-    /** Message notification executor service. */
-    private ExecutorService execSvc;
-
-    /** Grid name. */
-    private String gridName;
-
-    /** Complex variable that represents this node IP address. */
-    private volatile InetAddress locHost;
-
-    /** Local port which node uses. */
-    private int locPort = DFLT_PORT;
-
-    /** Local port range. */
-    private int locPortRange = DFLT_PORT_RANGE;
-
-    /** Local port which node uses to accept shared memory connections. */
-    private int shmemPort = -1;
-
-    /** Allocate direct buffer or heap buffer. */
-    private boolean directBuf = true;
-
-    /** Connect timeout. */
-    private long connTimeout = DFLT_CONN_TIMEOUT;
-
-    /** Maximum connect timeout. */
-    private long maxConnTimeout = DFLT_MAX_CONN_TIMEOUT;
-
-    /** Reconnect attempts count. */
-    @SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
-    private int reconCnt = DFLT_RECONNECT_CNT;
-
-    /** Socket send buffer. */
-    private int sockSndBuf;
-
-    /** Socket receive buffer. */
-    private int sockRcvBuf;
-
-    /** Message queue limit. */
-    private int msgQueueLimit = DFLT_MSG_QUEUE_LIMIT;
-
-    /** NIO server. */
-    private GridNioServer<HadoopMessage> nioSrvr;
-
-    /** Shared memory server. */
-    private IpcSharedMemoryServerEndpoint shmemSrv;
-
-    /** {@code TCP_NODELAY} option value for created sockets. */
-    private boolean tcpNoDelay = DFLT_TCP_NODELAY;
-
-    /** Shared memory accept worker. */
-    private ShmemAcceptWorker shmemAcceptWorker;
-
-    /** Shared memory workers. */
-    private final Collection<ShmemWorker> shmemWorkers = new ConcurrentLinkedDeque8<>();
-
-    /** Clients. */
-    private final ConcurrentMap<UUID, HadoopCommunicationClient> clients = GridConcurrentFactory.newMap();
-
-    /** Message listener. */
-    private volatile HadoopMessageListener lsnr;
-
-    /** Bound port. */
-    private int boundTcpPort = -1;
-
-    /** Bound port for shared memory server. */
-    private int boundTcpShmemPort = -1;
-
-    /** Count of selectors to use in TCP server. */
-    private int selectorsCnt = DFLT_SELECTORS_CNT;
-
-    /** Local node ID message. */
-    private ProcessHandshakeMessage locIdMsg;
-
-    /** Locks. */
-    private final GridKeyLock locks = new GridKeyLock();
-
-    /**
-     * @param parentNodeId Parent node ID.
-     * @param procId Process ID.
-     * @param marsh Marshaller to use.
-     * @param log Logger.
-     * @param execSvc Executor service for message notification.
-     * @param gridName Grid name.
-     */
-    public HadoopExternalCommunication(
-        UUID parentNodeId,
-        UUID procId,
-        Marshaller marsh,
-        IgniteLogger log,
-        ExecutorService execSvc,
-        String gridName
-    ) {
-        locProcDesc = new HadoopProcessDescriptor(parentNodeId, procId);
-
-        this.marsh = marsh;
-        this.log = log.getLogger(HadoopExternalCommunication.class);
-        this.execSvc = execSvc;
-        this.gridName = gridName;
-    }
-
-    /**
-     * Sets local port for socket binding.
-     * <p>
-     * If not provided, default value is {@link #DFLT_PORT}.
-     *
-     * @param locPort Port number.
-     */
-    public void setLocalPort(int locPort) {
-        this.locPort = locPort;
-    }
-
-    /**
-     * Gets local port for socket binding.
-     *
-     * @return Local port.
-     */
-    public int getLocalPort() {
-        return locPort;
-    }
-
-    /**
-     * Sets local port range for local host ports (value must greater than or equal to <tt>0</tt>).
-     * If provided local port (see {@link #setLocalPort(int)}} is occupied,
-     * implementation will try to increment the port number for as long as it is less than
-     * initial value plus this range.
-     * <p>
-     * If port range value is <tt>0</tt>, then implementation will try bind only to the port provided by
-     * {@link #setLocalPort(int)} method and fail if binding to this port did not succeed.
-     * <p>
-     * Local port range is very useful during development when more than one grid nodes need to run
-     * on the same physical machine.
-     * <p>
-     * If not provided, default value is {@link #DFLT_PORT_RANGE}.
-     *
-     * @param locPortRange New local port range.
-     */
-    public void setLocalPortRange(int locPortRange) {
-        this.locPortRange = locPortRange;
-    }
-
-    /**
-     * @return Local port range.
-     */
-    public int getLocalPortRange() {
-        return locPortRange;
-    }
-
-    /**
-     * Sets local port to accept shared memory connections.
-     * <p>
-     * If set to {@code -1} shared memory communication will be disabled.
-     * <p>
-     * If not provided, shared memory is disabled.
-     *
-     * @param shmemPort Port number.
-     */
-    public void setSharedMemoryPort(int shmemPort) {
-        this.shmemPort = shmemPort;
-    }
-
-    /**
-     * Gets shared memory port to accept incoming connections.
-     *
-     * @return Shared memory port.
-     */
-    public int getSharedMemoryPort() {
-        return shmemPort;
-    }
-
-    /**
-     * Sets connect timeout used when establishing connection
-     * with remote nodes.
-     * <p>
-     * {@code 0} is interpreted as infinite timeout.
-     * <p>
-     * If not provided, default value is {@link #DFLT_CONN_TIMEOUT}.
-     *
-     * @param connTimeout Connect timeout.
-     */
-    public void setConnectTimeout(long connTimeout) {
-        this.connTimeout = connTimeout;
-    }
-
-    /**
-     * @return Connection timeout.
-     */
-    public long getConnectTimeout() {
-        return connTimeout;
-    }
-
-    /**
-     * Sets maximum connect timeout. If handshake is not established within connect timeout,
-     * then SPI tries to repeat handshake procedure with increased connect timeout.
-     * Connect timeout can grow till maximum timeout value,
-     * if maximum timeout value is reached then the handshake is considered as failed.
-     * <p>
-     * {@code 0} is interpreted as infinite timeout.
-     * <p>
-     * If not provided, default value is {@link #DFLT_MAX_CONN_TIMEOUT}.
-     *
-     * @param maxConnTimeout Maximum connect timeout.
-     */
-    public void setMaxConnectTimeout(long maxConnTimeout) {
-        this.maxConnTimeout = maxConnTimeout;
-    }
-
-    /**
-     * Gets maximum connection timeout.
-     *
-     * @return Maximum connection timeout.
-     */
-    public long getMaxConnectTimeout() {
-        return maxConnTimeout;
-    }
-
-    /**
-     * Sets maximum number of reconnect attempts used when establishing connection
-     * with remote nodes.
-     * <p>
-     * If not provided, default value is {@link #DFLT_RECONNECT_CNT}.
-     *
-     * @param reconCnt Maximum number of reconnection attempts.
-     */
-    public void setReconnectCount(int reconCnt) {
-        this.reconCnt = reconCnt;
-    }
-
-    /**
-     * @return Reconnect count.
-     */
-    public int getReconnectCount() {
-        return reconCnt;
-    }
-
-    /**
-     * Sets flag to allocate direct or heap buffer in SPI.
-     * If value is {@code true}, then SPI will use {@link ByteBuffer#allocateDirect(int)} call.
-     * Otherwise, SPI will use {@link ByteBuffer#allocate(int)} call.
-     * <p>
-     * If not provided, default value is {@code true}.
-     *
-     * @param directBuf Flag indicates to allocate direct or heap buffer in SPI.
-     */
-    public void setDirectBuffer(boolean directBuf) {
-        this.directBuf = directBuf;
-    }
-
-    /**
-     * @return Direct buffer flag.
-     */
-    public boolean isDirectBuffer() {
-        return directBuf;
-    }
-
-    /**
-     * Sets the count of selectors te be used in TCP server.
-     * <p/>
-     * If not provided, default value is {@link #DFLT_SELECTORS_CNT}.
-     *
-     * @param selectorsCnt Selectors count.
-     */
-    public void setSelectorsCount(int selectorsCnt) {
-        this.selectorsCnt = selectorsCnt;
-    }
-
-    /**
-     * @return Number of selectors to use.
-     */
-    public int getSelectorsCount() {
-        return selectorsCnt;
-    }
-
-    /**
-     * Sets value for {@code TCP_NODELAY} socket option. Each
-     * socket will be opened using provided value.
-     * <p>
-     * Setting this option to {@code true} disables Nagle's algorithm
-     * for socket decreasing latency and delivery time for small messages.
-     * <p>
-     * For systems that work under heavy network load it is advisable to
-     * set this value to {@code false}.
-     * <p>
-     * If not provided, default value is {@link #DFLT_TCP_NODELAY}.
-     *
-     * @param tcpNoDelay {@code True} to disable TCP delay.
-     */
-    public void setTcpNoDelay(boolean tcpNoDelay) {
-        this.tcpNoDelay = tcpNoDelay;
-    }
-
-    /**
-     * @return {@code TCP_NO_DELAY} flag.
-     */
-    public boolean isTcpNoDelay() {
-        return tcpNoDelay;
-    }
-
-    /**
-     * Sets receive buffer size for sockets created or accepted by this SPI.
-     * <p>
-     * If not provided, default is {@code 0} which leaves buffer unchanged after
-     * socket creation (OS defaults).
-     *
-     * @param sockRcvBuf Socket receive buffer size.
-     */
-    public void setSocketReceiveBuffer(int sockRcvBuf) {
-        this.sockRcvBuf = sockRcvBuf;
-    }
-
-    /**
-     * @return Socket receive buffer size.
-     */
-    public int getSocketReceiveBuffer() {
-        return sockRcvBuf;
-    }
-
-    /**
-     * Sets send buffer size for sockets created or accepted by this SPI.
-     * <p>
-     * If not provided, default is {@code 0} which leaves the buffer unchanged
-     * after socket creation (OS defaults).
-     *
-     * @param sockSndBuf Socket send buffer size.
-     */
-    public void setSocketSendBuffer(int sockSndBuf) {
-        this.sockSndBuf = sockSndBuf;
-    }
-
-    /**
-     * @return Socket send buffer size.
-     */
-    public int getSocketSendBuffer() {
-        return sockSndBuf;
-    }
-
-    /**
-     * Sets message queue limit for incoming and outgoing messages.
-     * <p>
-     * When set to positive number send queue is limited to the configured value.
-     * {@code 0} disables the size limitations.
-     * <p>
-     * If not provided, default is {@link #DFLT_MSG_QUEUE_LIMIT}.
-     *
-     * @param msgQueueLimit Send queue size limit.
-     */
-    public void setMessageQueueLimit(int msgQueueLimit) {
-        this.msgQueueLimit = msgQueueLimit;
-    }
-
-    /**
-     * @return Message queue size limit.
-     */
-    public int getMessageQueueLimit() {
-        return msgQueueLimit;
-    }
-
-    /**
-     * Sets Hadoop communication message listener.
-     *
-     * @param lsnr Message listener.
-     */
-    public void setListener(HadoopMessageListener lsnr) {
-        this.lsnr = lsnr;
-    }
-
-    /**
-     * @return Outbound message queue size.
-     */
-    public int getOutboundMessagesQueueSize() {
-        return nioSrvr.outboundMessagesQueueSize();
-    }
-
-    /**
-     * Starts communication.
-     *
-     * @throws IgniteCheckedException If failed.
-     */
-    public void start() throws IgniteCheckedException {
-        try {
-            locHost = U.getLocalHost();
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException("Failed to initialize local address.", e);
-        }
-
-        try {
-            shmemSrv = resetShmemServer();
-        }
-        catch (IgniteCheckedException e) {
-            U.warn(log, "Failed to start shared memory communication server.", e);
-        }
-
-        try {
-            // This method potentially resets local port to the value
-            // local node was bound to.
-            nioSrvr = resetNioServer();
-        }
-        catch (IgniteCheckedException e) {
-            throw new IgniteCheckedException("Failed to initialize TCP server: " + locHost, e);
-        }
-
-        locProcDesc.address(locHost.getHostAddress());
-        locProcDesc.sharedMemoryPort(boundTcpShmemPort);
-        locProcDesc.tcpPort(boundTcpPort);
-
-        locIdMsg = new ProcessHandshakeMessage(locProcDesc);
-
-        if (shmemSrv != null) {
-            shmemAcceptWorker = new ShmemAcceptWorker(shmemSrv);
-
-            new IgniteThread(shmemAcceptWorker).start();
-        }
-
-        nioSrvr.start();
-    }
-
-    /**
-     * Gets local process descriptor.
-     *
-     * @return Local process descriptor.
-     */
-    public HadoopProcessDescriptor localProcessDescriptor() {
-        return locProcDesc;
-    }
-
-    /**
-     * Gets filters used by communication.
-     *
-     * @return Filters array.
-     */
-    private GridNioFilter[] filters() {
-        return new GridNioFilter[] {
-            new GridNioAsyncNotifyFilter(gridName, execSvc, log),
-            new HandshakeAndBackpressureFilter(),
-            new HadoopMarshallerFilter(marsh),
-            new GridNioCodecFilter(new GridBufferedParser(directBuf, ByteOrder.nativeOrder()), log, false)
-        };
-    }
-
-    /**
-     * Recreates tpcSrvr socket instance.
-     *
-     * @return Server instance.
-     * @throws IgniteCheckedException Thrown if it's not possible to create server.
-     */
-    private GridNioServer<HadoopMessage> resetNioServer() throws IgniteCheckedException {
-        if (boundTcpPort >= 0)
-            throw new IgniteCheckedException("Tcp NIO server was already created on port " + boundTcpPort);
-
-        IgniteCheckedException lastEx = null;
-
-        // If configured TCP port is busy, find first available in range.
-        for (int port = locPort; port < locPort + locPortRange; port++) {
-            try {
-                GridNioServer<HadoopMessage> srvr =
-                    GridNioServer.<HadoopMessage>builder()
-                        .address(locHost)
-                        .port(port)
-                        .listener(srvLsnr)
-                        .logger(log.getLogger(GridNioServer.class))
-                        .selectorCount(selectorsCnt)
-                        .gridName(gridName)
-                        .tcpNoDelay(tcpNoDelay)
-                        .directBuffer(directBuf)
-                        .byteOrder(ByteOrder.nativeOrder())
-                        .socketSendBufferSize(sockSndBuf)
-                        .socketReceiveBufferSize(sockRcvBuf)
-                        .sendQueueLimit(msgQueueLimit)
-                        .directMode(false)
-                        .filters(filters())
-                        .build();
-
-                boundTcpPort = port;
-
-                // Ack Port the TCP server was bound to.
-                if (log.isInfoEnabled())
-                    log.info("Successfully bound to TCP port [port=" + boundTcpPort +
-                        ", locHost=" + locHost + ']');
-
-                return srvr;
-            }
-            catch (IgniteCheckedException e) {
-                lastEx = e;
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to bind to local port (will try next port within range) [port=" + port +
-                        ", locHost=" + locHost + ']');
-            }
-        }
-
-        // If free port wasn't found.
-        throw new IgniteCheckedException("Failed to bind to any port within range [startPort=" + locPort +
-            ", portRange=" + locPortRange + ", locHost=" + locHost + ']', lastEx);
-    }
-
-    /**
-     * Creates new shared memory communication server.
-     * @return Server.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable private IpcSharedMemoryServerEndpoint resetShmemServer() throws IgniteCheckedException {
-        if (boundTcpShmemPort >= 0)
-            throw new IgniteCheckedException("Shared memory server was already created on port " + boundTcpShmemPort);
-
-        if (shmemPort == -1 || U.isWindows())
-            return null;
-
-        IgniteCheckedException lastEx = null;
-
-        // If configured TCP port is busy, find first available in range.
-        for (int port = shmemPort; port < shmemPort + locPortRange; port++) {
-            try {
-                IpcSharedMemoryServerEndpoint srv = new IpcSharedMemoryServerEndpoint(
-                    log.getLogger(IpcSharedMemoryServerEndpoint.class),
-                    locProcDesc.processId(), gridName);
-
-                srv.setPort(port);
-
-                srv.omitOutOfResourcesWarning(true);
-
-                srv.start();
-
-                boundTcpShmemPort = port;
-
-                // Ack Port the TCP server was bound to.
-                if (log.isInfoEnabled())
-                    log.info("Successfully bound shared memory communication to TCP port [port=" + boundTcpShmemPort +
-                        ", locHost=" + locHost + ']');
-
-                return srv;
-            }
-            catch (IgniteCheckedException e) {
-                lastEx = e;
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to bind to local port (will try next port within range) [port=" + port +
-                        ", locHost=" + locHost + ']');
-            }
-        }
-
-        // If free port wasn't found.
-        throw new IgniteCheckedException("Failed to bind shared memory communication to any port within range [startPort=" +
-            locPort + ", portRange=" + locPortRange + ", locHost=" + locHost + ']', lastEx);
-    }
-
-    /**
-     * Stops the server.
-     *
-     * @throws IgniteCheckedException
-     */
-    public void stop() throws IgniteCheckedException {
-        // Stop TCP server.
-        if (nioSrvr != null)
-            nioSrvr.stop();
-
-        U.cancel(shmemAcceptWorker);
-        U.join(shmemAcceptWorker, log);
-
-        U.cancel(shmemWorkers);
-        U.join(shmemWorkers, log);
-
-        shmemWorkers.clear();
-
-        // Force closing on stop (safety).
-        for (HadoopCommunicationClient client : clients.values())
-            client.forceClose();
-
-        // Clear resources.
-        nioSrvr = null;
-
-        boundTcpPort = -1;
-    }
-
-    /**
-     * Sends message to Hadoop process.
-     *
-     * @param desc
-     * @param msg
-     * @throws IgniteCheckedException
-     */
-    public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws
-        IgniteCheckedException {
-        assert desc != null;
-        assert msg != null;
-
-        if (log.isTraceEnabled())
-            log.trace("Sending message to Hadoop process [desc=" + desc + ", msg=" + msg + ']');
-
-        HadoopCommunicationClient client = null;
-
-        boolean closeOnRelease = true;
-
-        try {
-            client = reserveClient(desc);
-
-            client.sendMessage(desc, msg);
-
-            closeOnRelease = false;
-        }
-        finally {
-            if (client != null) {
-                if (closeOnRelease) {
-                    client.forceClose();
-
-                    clients.remove(desc.processId(), client);
-                }
-                else
-                    client.release();
-            }
-        }
-    }
-
-    /**
-     * Returns existing or just created client to node.
-     *
-     * @param desc Node to which client should be open.
-     * @return The existing or just created client.
-     * @throws IgniteCheckedException Thrown if any exception occurs.
-     */
-    private HadoopCommunicationClient reserveClient(HadoopProcessDescriptor desc) throws IgniteCheckedException {
-        assert desc != null;
-
-        UUID procId = desc.processId();
-
-        while (true) {
-            HadoopCommunicationClient client = clients.get(procId);
-
-            if (client == null) {
-                if (log.isDebugEnabled())
-                    log.debug("Did not find client for remote process [locProcDesc=" + locProcDesc + ", desc=" +
-                        desc + ']');
-
-                // Do not allow concurrent connects.
-                Object sync = locks.lock(procId);
-
-                try {
-                    client = clients.get(procId);
-
-                    if (client == null) {
-                        HadoopCommunicationClient old = clients.put(procId, client = createNioClient(desc));
-
-                        assert old == null;
-                    }
-                }
-                finally {
-                    locks.unlock(procId, sync);
-                }
-
-                assert client != null;
-            }
-
-            if (client.reserve())
-                return client;
-            else
-                // Client has just been closed by idle worker. Help it and try again.
-                clients.remove(procId, client);
-        }
-    }
-
-    /**
-     * @param desc Process descriptor.
-     * @return Client.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable protected HadoopCommunicationClient createNioClient(HadoopProcessDescriptor desc)
-        throws  IgniteCheckedException {
-        assert desc != null;
-
-        int shmemPort = desc.sharedMemoryPort();
-
-        // If remote node has shared memory server enabled and has the same set of MACs
-        // then we are likely to run on the same host and shared memory communication could be tried.
-        if (shmemPort != -1 && locProcDesc.parentNodeId().equals(desc.parentNodeId())) {
-            try {
-                return createShmemClient(desc, shmemPort);
-            }
-            catch (IgniteCheckedException e) {
-                if (e.hasCause(IpcOutOfSystemResourcesException.class))
-                    // Has cause or is itself the IpcOutOfSystemResourcesException.
-                    LT.warn(log, null, OUT_OF_RESOURCES_TCP_MSG);
-                else if (log.isDebugEnabled())
-                    log.debug("Failed to establish shared memory connection with local hadoop process: " +
-                        desc);
-            }
-        }
-
-        return createTcpClient(desc);
-    }
-
-    /**
-     * @param desc Process descriptor.
-     * @param port Port.
-     * @return Client.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable protected HadoopCommunicationClient createShmemClient(HadoopProcessDescriptor desc, int port)
-        throws IgniteCheckedException {
-        int attempt = 1;
-
-        int connectAttempts = 1;
-
-        long connTimeout0 = connTimeout;
-
-        while (true) {
-            IpcEndpoint clientEndpoint;
-
-            try {
-                clientEndpoint = new IpcSharedMemoryClientEndpoint(port, (int)connTimeout, log);
-            }
-            catch (IgniteCheckedException e) {
-                // Reconnect for the second time, if connection is not established.
-                if (connectAttempts < 2 && X.hasCause(e, ConnectException.class)) {
-                    connectAttempts++;
-
-                    continue;
-                }
-
-                throw e;
-            }
-
-            HadoopCommunicationClient client = null;
-
-            try {
-                ShmemWorker worker = new ShmemWorker(clientEndpoint, false);
-
-                shmemWorkers.add(worker);
-
-                GridNioSession ses = worker.session();
-
-                HandshakeFinish fin = new HandshakeFinish();
-
-                // We are in lock, it is safe to get session and attach
-                ses.addMeta(HANDSHAKE_FINISH_META, fin);
-
-                client = new HadoopTcpNioCommunicationClient(ses);
-
-                new IgniteThread(worker).start();
-
-                fin.await(connTimeout0);
-            }
-            catch (HadoopHandshakeTimeoutException e) {
-                if (log.isDebugEnabled())
-                    log.debug("Handshake timed out (will retry with increased timeout) [timeout=" + connTimeout0 +
-                        ", err=" + e.getMessage() + ", client=" + client + ']');
-
-                if (client != null)
-                    client.forceClose();
-
-                if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
-                    if (log.isDebugEnabled())
-                        log.debug("Handshake timedout (will stop attempts to perform the handshake) " +
-                            "[timeout=" + connTimeout0 + ", maxConnTimeout=" + maxConnTimeout +
-                            ", attempt=" + attempt + ", reconCnt=" + reconCnt +
-                            ", err=" + e.getMessage() + ", client=" + client + ']');
-
-                    throw e;
-                }
-                else {
-                    attempt++;
-
-                    connTimeout0 *= 2;
-
-                    continue;
-                }
-            }
-            catch (RuntimeException | Error e) {
-                if (log.isDebugEnabled())
-                    log.debug(
-                        "Caught exception (will close client) [err=" + e.getMessage() + ", client=" + client + ']');
-
-                if (client != null)
-                    client.forceClose();
-
-                throw e;
-            }
-
-            return client;
-        }
-    }
-
-    /**
-     * Establish TCP connection to remote hadoop process and returns client.
-     *
-     * @param desc Process descriptor.
-     * @return Client.
-     * @throws IgniteCheckedException If failed.
-     */
-    protected HadoopCommunicationClient createTcpClient(HadoopProcessDescriptor desc) throws IgniteCheckedException {
-        String addr = desc.address();
-
-        int port = desc.tcpPort();
-
-        if (log.isDebugEnabled())
-            log.debug("Trying to connect to remote process [locProcDesc=" + locProcDesc + ", desc=" + desc + ']');
-
-        boolean conn = false;
-        HadoopTcpNioCommunicationClient client = null;
-        IgniteCheckedException errs = null;
-
-        int connectAttempts = 1;
-
-        long connTimeout0 = connTimeout;
-
-        int attempt = 1;
-
-        while (!conn) { // Reconnection on handshake timeout.
-            try {
-                SocketChannel ch = SocketChannel.open();
-
-                ch.configureBlocking(true);
-
-                ch.socket().setTcpNoDelay(tcpNoDelay);
-                ch.socket().setKeepAlive(true);
-
-                if (sockRcvBuf > 0)
-                    ch.socket().setReceiveBufferSize(sockRcvBuf);
-
-                if (sockSndBuf > 0)
-                    ch.socket().setSendBufferSize(sockSndBuf);
-
-                ch.socket().connect(new InetSocketAddress(addr, port), (int)connTimeout);
-
-                HandshakeFinish fin = new HandshakeFinish();
-
-                GridNioSession ses = nioSrvr.createSession(ch, F.asMap(HANDSHAKE_FINISH_META, fin)).get();
-
-                client = new HadoopTcpNioCommunicationClient(ses);
-
-                if (log.isDebugEnabled())
-                    log.debug("Waiting for handshake finish for client: " + client);
-
-                fin.await(connTimeout0);
-
-                conn = true;
-            }
-            catch (HadoopHandshakeTimeoutException e) {
-                if (client != null) {
-                    client.forceClose();
-
-                    client = null;
-                }
-
-                if (log.isDebugEnabled())
-                    log.debug(
-                        "Handshake timedout (will retry with increased timeout) [timeout=" + connTimeout0 +
-                            ", desc=" + desc + ", port=" + port + ", err=" + e + ']');
-
-                if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
-                    if (log.isDebugEnabled())
-                        log.debug("Handshake timed out (will stop attempts to perform the handshake) " +
-                            "[timeout=" + connTimeout0 + ", maxConnTimeout=" + maxConnTimeout +
-                            ", attempt=" + attempt + ", reconCnt=" + reconCnt +
-                            ", err=" + e.getMessage() + ", addr=" + addr + ']');
-
-                    if (errs == null)
-                        errs = new IgniteCheckedException("Failed to connect to remote Hadoop process " +
-                            "(is process still running?) [desc=" + desc + ", addrs=" + addr + ']');
-
-                    errs.addSuppressed(e);
-
-                    break;
-                }
-                else {
-                    attempt++;
-
-                    connTimeout0 *= 2;
-
-                    // Continue loop.
-                }
-            }
-            catch (Exception e) {
-                if (client != null) {
-                    client.forceClose();
-
-                    client = null;
-                }
-
-                if (log.isDebugEnabled())
-                    log.debug("Client creation failed [addr=" + addr + ", port=" + port +
-                        ", err=" + e + ']');
-
-                if (X.hasCause(e, SocketTimeoutException.class))
-                    LT.warn(log, null, "Connect timed out (consider increasing 'connTimeout' " +
-                        "configuration property) [addr=" + addr + ", port=" + port + ']');
-
-                if (errs == null)
-                    errs = new IgniteCheckedException("Failed to connect to remote Hadoop process (is process still running?) " +
-                        "[desc=" + desc + ", addrs=" + addr + ']');
-
-                errs.addSuppressed(e);
-
-                // Reconnect for the second time, if connection is not established.
-                if (connectAttempts < 2 &&
-                    (e instanceof ConnectException || X.hasCause(e, ConnectException.class))) {
-                    connectAttempts++;
-
-                    continue;
-                }
-
-                break;
-            }
-        }
-
-        if (client == null) {
-            assert errs != null;
-
-            if (X.hasCause(errs, ConnectException.class))
-                LT.warn(log, null, "Failed to connect to a remote Hadoop process (is process still running?). " +
-                    "Make sure operating system firewall is disabled on local and remote host) " +
-                    "[addrs=" + addr + ", port=" + port + ']');
-
-            throw errs;
-        }
-
-        if (log.isDebugEnabled())
-            log.debug("Created client: " + client);
-
-        return client;
-    }
-
-    /**
-     * @param desc Sender process descriptor.
-     * @param msg Communication message.
-     */
-    protected void notifyListener(HadoopProcessDescriptor desc, HadoopMessage msg) {
-        HadoopMessageListener lsnr = this.lsnr;
-
-        if (lsnr != null)
-            // Notify listener of a new message.
-            lsnr.onMessageReceived(desc, msg);
-        else if (log.isDebugEnabled())
-            log.debug("Received communication message without any registered listeners (will ignore) " +
-                "[senderProcDesc=" + desc + ", msg=" + msg + ']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopExternalCommunication.class, this);
-    }
-
-    /**
-     * This worker takes responsibility to shut the server down when stopping,
-     * No other thread shall stop passed server.
-     */
-    private class ShmemAcceptWorker extends GridWorker {
-        /** */
-        private final IpcSharedMemoryServerEndpoint srv;
-
-        /**
-         * @param srv Server.
-         */
-        ShmemAcceptWorker(IpcSharedMemoryServerEndpoint srv) {
-            super(gridName, "shmem-communication-acceptor", HadoopExternalCommunication.this.log);
-
-            this.srv = srv;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void body() throws InterruptedException {
-            try {
-                while (!Thread.interrupted()) {
-                    ShmemWorker e = new ShmemWorker(srv.accept(), true);
-
-                    shmemWorkers.add(e);
-
-                    new IgniteThread(e).start();
-                }
-            }
-            catch (IgniteCheckedException e) {
-                if (!isCancelled())
-                    U.error(log, "Shmem server failed.", e);
-            }
-            finally {
-                srv.close();
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public void cancel() {
-            super.cancel();
-
-            srv.close();
-        }
-    }
-
-    /**
-     *
-     */
-    private class ShmemWorker extends GridWorker {
-        /** */
-        private final IpcEndpoint endpoint;
-
-        /** Adapter. */
-        private HadoopIpcToNioAdapter<HadoopMessage> adapter;
-
-        /**
-         * @param endpoint Endpoint.
-         */
-        private ShmemWorker(IpcEndpoint endpoint, boolean accepted) {
-            super(gridName, "shmem-worker", HadoopExternalCommunication.this.log);
-
-            this.endpoint = endpoint;
-
-            adapter = new HadoopIpcToNioAdapter<>(
-                HadoopExternalCommunication.this.log,
-                endpoint,
-                accepted,
-                srvLsnr,
-                filters());
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void body() throws InterruptedException {
-            try {
-                adapter.serve();
-            }
-            finally {
-                shmemWorkers.remove(this);
-
-                endpoint.close();
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public void cancel() {
-            super.cancel();
-
-            endpoint.close();
-        }
-
-        /** @{@inheritDoc} */
-        @Override protected void cleanup() {
-            super.cleanup();
-
-            endpoint.close();
-        }
-
-        /** @{@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(ShmemWorker.class, this);
-        }
-
-        /**
-         * @return NIO session for this worker.
-         */
-        public GridNioSession session() {
-            return adapter.session();
-        }
-    }
-
-    /**
-     *
-     */
-    private static class HandshakeFinish {
-        /** Await latch. */
-        private CountDownLatch latch = new CountDownLatch(1);
-
-        /**
-         * Finishes handshake.
-         */
-        public void finish() {
-            latch.countDown();
-        }
-
-        /**
-         * @param time Time to wait.
-         * @throws HadoopHandshakeTimeoutException If failed to wait.
-         */
-        public void await(long time) throws HadoopHandshakeTimeoutException {
-            try {
-                if (!latch.await(time, TimeUnit.MILLISECONDS))
-                    throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish [timeout=" +
-                        time + ']');
-            }
-            catch (InterruptedException e) {
-                Thread.currentThread().interrupt();
-
-                throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish (thread was " +
-                    "interrupted) [timeout=" + time + ']', e);
-            }
-        }
-    }
-
-    /**
-     *
-     */
-    private class HandshakeAndBackpressureFilter extends GridNioFilterAdapter {
-        /**
-         * Assigns filter name to a filter.
-         */
-        protected HandshakeAndBackpressureFilter() {
-            super("HadoopHandshakeFilter");
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionOpened(final GridNioSession ses) throws IgniteCheckedException {
-            if (ses.accepted()) {
-                if (log.isDebugEnabled())
-                    log.debug("Accepted connection, initiating handshake: " + ses);
-
-                // Server initiates handshake.
-                ses.send(locIdMsg).listen(new CI1<IgniteInternalFuture<?>>() {
-                    @Override public void apply(IgniteInternalFuture<?> fut) {
-                        try {
-                            // Make sure there were no errors.
-                            fut.get();
-                        }
-                        catch (IgniteCheckedException e) {
-                            log.warning("Failed to send handshake message, will close session: " + ses, e);
-
-                            ses.close();
-                        }
-                    }
-                });
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionClosed(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
-            proceedExceptionCaught(ses, ex);
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) throws IgniteCheckedException {
-            if (ses.meta(PROCESS_META) == null && !(msg instanceof ProcessHandshakeMessage))
-                log.warning("Writing message before handshake has finished [ses=" + ses + ", msg=" + msg + ']');
-
-            return proceedSessionWrite(ses, msg);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
-            HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
-
-            UUID rmtProcId = desc == null ? null : desc.processId();
-
-            if (rmtProcId == null) {
-                if (!(msg instanceof ProcessHandshakeMessage)) {
-                    log.warning("Invalid handshake message received, will close connection [ses=" + ses +
-                        ", msg=" + msg + ']');
-
-                    ses.close();
-
-                    return;
-                }
-
-                ProcessHandshakeMessage nId = (ProcessHandshakeMessage)msg;
-
-                if (log.isDebugEnabled())
-                    log.debug("Received handshake message [ses=" + ses + ", msg=" + msg + ']');
-
-                ses.addMeta(PROCESS_META, nId.processDescriptor());
-
-                if (!ses.accepted())
-                    // Send handshake reply.
-                    ses.send(locIdMsg);
-                else {
-                    //
-                    rmtProcId = nId.processDescriptor().processId();
-
-                    if (log.isDebugEnabled())
-                        log.debug("Finished handshake with remote client: " + ses);
-
-                    Object sync = locks.tryLock(rmtProcId);
-
-                    if (sync != null) {
-                        try {
-                            if (clients.get(rmtProcId) == null) {
-                                if (log.isDebugEnabled())
-                                    log.debug("Will reuse session for descriptor: " + rmtProcId);
-
-                                // Handshake finished flag is true.
-                                clients.put(rmtProcId, new HadoopTcpNioCommunicationClient(ses));
-                            }
-                            else {
-                                if (log.isDebugEnabled())
-                                    log.debug("Will not reuse client as another already exists [locProcDesc=" +
-                                        locProcDesc + ", desc=" + desc + ']');
-                            }
-                        }
-                        finally {
-                            locks.unlock(rmtProcId, sync);
-                        }
-                    }
-                    else {
-                        if (log.isDebugEnabled())
-                            log.debug("Concurrent connection is being established, will not reuse client session [" +
-                                "locProcDesc=" + locProcDesc + ", desc=" + desc + ']');
-                    }
-                }
-
-                if (log.isDebugEnabled())
-                    log.debug("Handshake is finished for session [ses=" + ses + ", locProcDesc=" + locProcDesc + ']');
-
-                HandshakeFinish to = ses.meta(HANDSHAKE_FINISH_META);
-
-                if (to != null)
-                    to.finish();
-
-                // Notify session opened (both parties).
-                proceedSessionOpened(ses);
-            }
-            else {
-                if (msgQueueLimit > 0) {
-                    GridNioMessageTracker tracker = ses.meta(TRACKER_META);
-
-                    if (tracker == null) {
-                        GridNioMessageTracker old = ses.addMeta(TRACKER_META, tracker =
-                            new GridNioMessageTracker(ses, msgQueueLimit));
-
-                        assert old == null;
-                    }
-
-                    tracker.onMessageReceived();
-                }
-
-                proceedMessageReceived(ses, msg);
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) throws IgniteCheckedException {
-            return proceedSessionClose(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionIdleTimeout(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionWriteTimeout(ses);
-        }
-    }
-
-    /**
-     * Process ID message.
-     */
-    @SuppressWarnings("PublicInnerClass")
-    public static class ProcessHandshakeMessage implements HadoopMessage {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Node ID. */
-        private HadoopProcessDescriptor procDesc;
-
-        /** */
-        public ProcessHandshakeMessage() {
-            // No-op.
-        }
-
-        /**
-         * @param procDesc Process descriptor.
-         */
-        private ProcessHandshakeMessage(HadoopProcessDescriptor procDesc) {
-            this.procDesc = procDesc;
-        }
-
-        /**
-         * @return Process ID.
-         */
-        public HadoopProcessDescriptor processDescriptor() {
-            return procDesc;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(procDesc);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            procDesc = (HadoopProcessDescriptor)in.readObject();
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(ProcessHandshakeMessage.class, this);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
deleted file mode 100644
index b2a85e1..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.jetbrains.annotations.Nullable;
-
-/** Internal exception class for proper timeout handling. */
-class HadoopHandshakeTimeoutException extends IgniteCheckedException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * @param msg Message.
-     */
-    HadoopHandshakeTimeoutException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * @param msg Message.
-     * @param cause Cause.
-     */
-    HadoopHandshakeTimeoutException(String msg, @Nullable Throwable cause) {
-        super(msg, cause);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
deleted file mode 100644
index a8de999..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.util.ipc.IpcEndpoint;
-import org.apache.ignite.internal.util.nio.GridNioFilter;
-import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
-import org.apache.ignite.internal.util.nio.GridNioFilterChain;
-import org.apache.ignite.internal.util.nio.GridNioFinishedFuture;
-import org.apache.ignite.internal.util.nio.GridNioFuture;
-import org.apache.ignite.internal.util.nio.GridNioServerListener;
-import org.apache.ignite.internal.util.nio.GridNioSession;
-import org.apache.ignite.internal.util.nio.GridNioSessionImpl;
-
-/**
- * Allows to re-use existing {@link GridNioFilter}s on IPC (specifically shared memory IPC)
- * communications.
- *
- * Note that this class consumes an entire thread inside {@link #serve()} method
- * in order to serve one {@link org.apache.ignite.internal.util.ipc.IpcEndpoint}.
- */
-public class HadoopIpcToNioAdapter<T> {
-    /** */
-    private final IpcEndpoint endp;
-
-    /** */
-    private final GridNioFilterChain<T> chain;
-
-    /** */
-    private final GridNioSessionImpl ses;
-
-    /** */
-    private final AtomicReference<CountDownLatch> latchRef = new AtomicReference<>();
-
-    /** */
-    private final ByteBuffer writeBuf;
-
-    /**
-     * @param log Log.
-     * @param endp Endpoint.
-     * @param lsnr Listener.
-     * @param filters Filters.
-     */
-    public HadoopIpcToNioAdapter(IgniteLogger log, IpcEndpoint endp, boolean accepted,
-        GridNioServerListener<T> lsnr, GridNioFilter... filters) {
-        this.endp = endp;
-
-        chain = new GridNioFilterChain<>(log, lsnr, new HeadFilter(), filters);
-        ses = new GridNioSessionImpl(chain, null, null, accepted);
-
-        writeBuf = ByteBuffer.allocate(8 << 10);
-
-        writeBuf.order(ByteOrder.nativeOrder());
-    }
-
-    /**
-     * Serves given set of listeners repeatedly reading data from the endpoint.
-     *
-     * @throws InterruptedException If interrupted.
-     */
-    public void serve() throws InterruptedException {
-        try {
-            chain.onSessionOpened(ses);
-
-            InputStream in = endp.inputStream();
-
-            ByteBuffer readBuf = ByteBuffer.allocate(8 << 10);
-
-            readBuf.order(ByteOrder.nativeOrder());
-
-            assert readBuf.hasArray();
-
-            while (!Thread.interrupted()) {
-                int pos = readBuf.position();
-
-                int read = in.read(readBuf.array(), pos, readBuf.remaining());
-
-                if (read > 0) {
-                    readBuf.position(0);
-                    readBuf.limit(pos + read);
-
-                    chain.onMessageReceived(ses, readBuf);
-
-                    if (readBuf.hasRemaining())
-                        readBuf.compact();
-                    else
-                        readBuf.clear();
-
-                    CountDownLatch latch = latchRef.get();
-
-                    if (latch != null)
-                        latch.await();
-                }
-                else if (read < 0) {
-                    endp.close();
-
-                    break; // And close below.
-                }
-            }
-
-            // Assuming remote end closed connection - pushing event from head to tail.
-            chain.onSessionClosed(ses);
-        }
-        catch (Exception e) {
-            chain.onExceptionCaught(ses, new IgniteCheckedException("Failed to read from IPC endpoint.", e));
-        }
-    }
-
-    /**
-     * Gets dummy session for this adapter.
-     *
-     * @return Session.
-     */
-    public GridNioSession session() {
-        return ses;
-    }
-
-    /**
-     * Handles write events on chain.
-     *
-     * @param msg Buffer to send.
-     * @return Send result.
-     */
-    private GridNioFuture<?> send(ByteBuffer msg) {
-        assert writeBuf.hasArray();
-
-        try {
-            while (msg.hasRemaining()) {
-                writeBuf.clear();
-
-                writeBuf.put(msg);
-
-                endp.outputStream().write(writeBuf.array(), 0, writeBuf.position());
-            }
-        }
-        catch (IOException | IgniteCheckedException e) {
-            return new GridNioFinishedFuture<Object>(e);
-        }
-
-        return new GridNioFinishedFuture<>((Object)null);
-    }
-
-    /**
-     * Filter forwarding messages from chain's head to this server.
-     */
-    private class HeadFilter extends GridNioFilterAdapter {
-        /**
-         * Assigns filter name.
-         */
-        protected HeadFilter() {
-            super("HeadFilter");
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionOpened(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionOpened(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionClosed(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
-            proceedExceptionCaught(ses, ex);
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) {
-            assert ses == HadoopIpcToNioAdapter.this.ses : "ses=" + ses +
-                ", this.ses=" + HadoopIpcToNioAdapter.this.ses;
-
-            return send((ByteBuffer)msg);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
-            proceedMessageReceived(ses, msg);
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<?> onPauseReads(GridNioSession ses) throws IgniteCheckedException {
-            // This call should be synced externally to avoid races.
-            boolean b = latchRef.compareAndSet(null, new CountDownLatch(1));
-
-            assert b;
-
-            return new GridNioFinishedFuture<>(b);
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<?> onResumeReads(GridNioSession ses) throws IgniteCheckedException {
-            // This call should be synced externally to avoid races.
-            CountDownLatch latch = latchRef.getAndSet(null);
-
-            if (latch != null)
-                latch.countDown();
-
-            return new GridNioFinishedFuture<Object>(latch != null);
-        }
-
-        /** {@inheritDoc} */
-        @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) {
-            assert ses == HadoopIpcToNioAdapter.this.ses;
-
-            boolean closed = HadoopIpcToNioAdapter.this.ses.setClosed();
-
-            if (closed)
-                endp.close();
-
-            return new GridNioFinishedFuture<>(closed);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionIdleTimeout(ses);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
-            proceedSessionWriteTimeout(ses);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
deleted file mode 100644
index 3f79469..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
-import org.apache.ignite.internal.util.nio.GridNioFuture;
-import org.apache.ignite.internal.util.nio.GridNioSession;
-import org.apache.ignite.marshaller.Marshaller;
-
-/**
- * Serialization filter.
- */
-public class HadoopMarshallerFilter extends GridNioFilterAdapter {
-    /** Marshaller. */
-    private Marshaller marshaller;
-
-    /**
-     * @param marshaller Marshaller to use.
-     */
-    public HadoopMarshallerFilter(Marshaller marshaller) {
-        super("HadoopMarshallerFilter");
-
-        this.marshaller = marshaller;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onSessionOpened(GridNioSession ses) throws IgniteCheckedException {
-        proceedSessionOpened(ses);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
-        proceedSessionClosed(ses);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
-        proceedExceptionCaught(ses, ex);
-    }
-
-    /** {@inheritDoc} */
-    @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) throws IgniteCheckedException {
-        assert msg instanceof HadoopMessage : "Invalid message type: " + msg;
-
-        return proceedSessionWrite(ses, marshaller.marshal(msg));
-    }
-
-    @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
-        assert msg instanceof byte[];
-
-        // Always unmarshal with system classloader.
-        proceedMessageReceived(ses, marshaller.unmarshal((byte[])msg, null));
-    }
-
-    /** {@inheritDoc} */
-    @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) throws IgniteCheckedException {
-        return proceedSessionClose(ses);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
-        proceedSessionIdleTimeout(ses);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
-        proceedSessionWriteTimeout(ses);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
deleted file mode 100644
index 6d50f43..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-
-/**
- * Hadoop communication message listener.
- */
-public interface HadoopMessageListener {
-    /**
-     * @param desc Process descriptor.
-     * @param msg Hadoop message.
-     */
-    public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg);
-
-    /**
-     * Called when connection to remote process was lost.
-     *
-     * @param desc Process descriptor.
-     */
-    public void onConnectionLost(HadoopProcessDescriptor desc);
-}
\ No newline at end of file


[33/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
new file mode 100644
index 0000000..8ddb359
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfs;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.processors.igfs.IgfsContext;
+import org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter;
+import org.apache.ignite.internal.processors.igfs.IgfsServer;
+import org.apache.ignite.internal.processors.igfs.IgfsServerHandler;
+import org.apache.ignite.internal.processors.igfs.IgfsServerManager;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * Test interaction between a IGFS client and a IGFS server.
+ */
+public class IgniteHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest {
+    /** Logger. */
+    private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystemClientSelfTest.class);
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrids(1);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs");
+        igfsCfg.setBlockSize(512 * 1024);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(DFLT_IPC_PORT);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        cfg.setCacheConfiguration(cacheConfiguration());
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        return cfg;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration[] cacheConfiguration() {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(FULL_SYNC);
+        cacheCfg.setEvictionPolicy(null);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
+        metaCacheCfg.setEvictionPolicy(null);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
+    }
+
+    /**
+     * Test output stream deferred exception (GG-4440).
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testOutputStreamDeferredException() throws Exception {
+        final byte[] data = "test".getBytes();
+
+        try {
+            switchHandlerErrorFlag(true);
+
+            HadoopIgfs client = new HadoopIgfsOutProc("127.0.0.1", 10500, getTestGridName(0), "igfs", LOG, null);
+
+            client.handshake(null);
+
+            IgfsPath path = new IgfsPath("/test1.file");
+
+            HadoopIgfsStreamDelegate delegate = client.create(path, true, false, 1, 1024, null);
+
+            final HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(delegate, LOG,
+                IgfsLogger.disabledLogger(), 0);
+
+            // This call should return fine as exception is thrown for the first time.
+            igfsOut.write(data);
+
+            U.sleep(500);
+
+            // This call should throw an IO exception.
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    igfsOut.write(data);
+
+                    return null;
+                }
+            }, IOException.class, "Failed to write data to server (test).");
+        }
+        finally {
+            switchHandlerErrorFlag(false);
+        }
+    }
+
+    /**
+     * Set IGFS REST handler error flag to the given state.
+     *
+     * @param flag Flag state.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private void switchHandlerErrorFlag(boolean flag) throws Exception {
+        IgfsProcessorAdapter igfsProc = ((IgniteKernal)grid(0)).context().igfs();
+
+        Map<String, IgfsContext> igfsMap = getField(igfsProc, "igfsCache");
+
+        IgfsServerManager srvMgr = F.first(igfsMap.values()).server();
+
+        Collection<IgfsServer> srvrs = getField(srvMgr, "srvrs");
+
+        IgfsServerHandler igfsHnd = getField(F.first(srvrs), "hnd");
+
+        Field field = igfsHnd.getClass().getDeclaredField("errWrite");
+
+        field.setAccessible(true);
+
+        field.set(null, flag);
+    }
+
+    /**
+     * Get value of the field with the given name of the given object.
+     *
+     * @param obj Object.
+     * @param fieldName Field name.
+     * @return Value of the field.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    private <T> T getField(Object obj, String fieldName) throws Exception {
+        Field field = obj.getClass().getDeclaredField(fieldName);
+
+        field.setAccessible(true);
+
+        return (T)field.get(obj);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
new file mode 100644
index 0000000..fdb0d77
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
@@ -0,0 +1,389 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP;
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * Tests for IGFS file system handshake.
+ */
+public class IgniteHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Grid name. */
+    private static final String GRID_NAME = "grid";
+
+    /** IGFS name. */
+    private static final String IGFS_NAME = "igfs";
+
+    /** IGFS path. */
+    private static final IgfsPath PATH = new IgfsPath("/path");
+
+    /** A host-port pair used for URI in embedded mode. */
+    private static final String HOST_PORT_UNUSED = "somehost:65333";
+
+    /** Flag defines if to use TCP or embedded connection mode: */
+    private boolean tcp = false;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /**
+     * Tests for Grid and IGFS having normal names.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHandshake() throws Exception {
+        startUp(false, false);
+
+        tcp = true;
+
+        checkValid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkValid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
+        checkValid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid(IGFS_NAME + "@");
+        checkValid(IGFS_NAME + "@127.0.0.1");
+        checkValid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid(":" + GRID_NAME + "@");
+        checkValid(":" + GRID_NAME + "@127.0.0.1");
+        checkValid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid("");
+        checkValid("127.0.0.1");
+        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
+
+        tcp = false; // Embedded mode:
+
+        checkValid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkValid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid(IGFS_NAME + "@"); // Embedded mode fails, but remote tcp succeeds.
+        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid(":" + GRID_NAME + "@"); // Embedded mode fails, but remote tcp succeeds.
+        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid("@"); // Embedded mode fails, but remote tcp succeeds.
+        checkInvalid("@" + HOST_PORT_UNUSED);
+    }
+
+    /**
+     * Tests for Grid having {@code null} name and IGFS having normal name.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHandshakeDefaultGrid() throws Exception {
+        startUp(true, false);
+
+        tcp = true;
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid(IGFS_NAME + "@");
+        checkValid(IGFS_NAME + "@127.0.0.1");
+        checkValid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkInvalid(":" + GRID_NAME + "@");
+        checkInvalid(":" + GRID_NAME + "@127.0.0.1");
+        checkInvalid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid("");
+        checkValid("127.0.0.1");
+        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
+
+        tcp = false; // Embedded mode:
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid(IGFS_NAME + "@");
+        checkValid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkInvalid(":" + GRID_NAME + "@");
+        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid("@"); // Embedded mode fails, but remote tcp succeeds.
+        checkInvalid("@" + HOST_PORT_UNUSED);
+    }
+
+    /**
+     * Tests for Grid having normal name and IGFS having {@code null} name.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHandshakeDefaultIgfs() throws Exception {
+        startUp(false/*grid name*/, true/*default igfs*/);
+
+        tcp = true;
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkInvalid(IGFS_NAME + "@");
+        checkInvalid(IGFS_NAME + "@127.0.0.1");
+        checkInvalid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid(":" + GRID_NAME + "@");
+        checkValid(":" + GRID_NAME + "@127.0.0.1");
+        checkValid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid("");
+        checkValid("127.0.0.1");
+        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
+
+        tcp = false; // Embedded mode:
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkInvalid(IGFS_NAME + "@");
+        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid(":" + GRID_NAME + "@");
+        checkValid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid("@"); // NB: in embedded mode this fails, but remote TCP still succeeds.
+        checkInvalid("@" + HOST_PORT_UNUSED);
+    }
+
+    /**
+     * Tests for Grid having {@code null} name and IGFS having {@code null} name.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHandshakeDefaultGridDefaultIgfs() throws Exception {
+        startUp(true, true);
+
+        tcp = true;
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkInvalid(IGFS_NAME + "@");
+        checkInvalid(IGFS_NAME + "@127.0.0.1");
+        checkInvalid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkInvalid(":" + GRID_NAME + "@");
+        checkInvalid(":" + GRID_NAME + "@127.0.0.1");
+        checkInvalid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
+
+        checkValid("");
+        checkValid("127.0.0.1");
+        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
+
+        tcp = false; // Embedded mode:
+
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
+        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkInvalid(IGFS_NAME + "@");
+        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkInvalid(":" + GRID_NAME + "@");
+        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
+
+        checkValid("@");
+        checkValid("@" + HOST_PORT_UNUSED);
+    }
+
+    /**
+     * Perform startup.
+     *
+     * @param dfltGridName Default Grid name.
+     * @param dfltIgfsName Default IGFS name.
+     * @throws Exception If failed.
+     */
+    private void startUp(boolean dfltGridName, boolean dfltIgfsName) throws Exception {
+        Ignite ignite = G.start(gridConfiguration(dfltGridName, dfltIgfsName));
+
+        IgniteFileSystem igfs = ignite.fileSystem(dfltIgfsName ? null : IGFS_NAME);
+
+        igfs.mkdirs(PATH);
+    }
+
+    /**
+     * Create Grid configuration.
+     *
+     * @param dfltGridName Default Grid name.
+     * @param dfltIgfsName Default IGFS name.
+     * @return Grid configuration.
+     * @throws Exception If failed.
+     */
+    private IgniteConfiguration gridConfiguration(boolean dfltGridName, boolean dfltIgfsName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(dfltGridName ? null : GRID_NAME);
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setConnectorConfiguration(null);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
+
+        commSpi.setSharedMemoryPort(-1);
+
+        cfg.setCommunicationSpi(commSpi);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
+
+        dataCacheCfg.setName("partitioned");
+        dataCacheCfg.setCacheMode(PARTITIONED);
+        dataCacheCfg.setNearConfiguration(null);
+        dataCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
+        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
+        dataCacheCfg.setBackups(0);
+        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        cfg.setCacheConfiguration(metaCacheCfg, dataCacheCfg);
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName(dfltIgfsName ? null : IGFS_NAME);
+        igfsCfg.setPrefetchBlocks(1);
+        igfsCfg.setDefaultMode(PRIMARY);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(DFLT_IPC_PORT);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        igfsCfg.setManagementPort(-1);
+        igfsCfg.setBlockSize(512 * 1024);
+
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        return cfg;
+    }
+
+    /**
+     * Check valid file system endpoint.
+     *
+     * @param authority Authority.
+     * @throws Exception If failed.
+     */
+    private void checkValid(String authority) throws Exception {
+        FileSystem fs = fileSystem(authority, tcp);
+
+        assert fs.exists(new Path(PATH.toString()));
+    }
+
+    /**
+     * Check invalid file system endpoint.
+     *
+     * @param authority Authority.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void checkInvalid(final String authority) throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fileSystem(authority, tcp);
+
+                return null;
+            }
+        }, IOException.class, null);
+    }
+
+    /**
+     * Gets the file system using authority and tcp flag.
+     *
+     * @param authority Authority.
+     * @return File system.
+     * @throws Exception If failed.
+     */
+    private static FileSystem fileSystem(String authority, boolean tcp) throws Exception {
+        return FileSystem.get(new URI("igfs://" + authority + "/"), configuration(authority, tcp));
+    }
+
+    /**
+     * Create configuration for test.
+     *
+     * @param authority Authority.
+     * @return Configuration.
+     */
+    private static Configuration configuration(String authority, boolean tcp) {
+        Configuration cfg = new Configuration();
+
+        cfg.set("fs.defaultFS", "igfs://" + authority + "/");
+        cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName());
+        cfg.set("fs.AbstractFileSystem.igfs.impl",
+            IgniteHadoopFileSystem.class.getName());
+
+        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        if (tcp)
+            cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
+        else
+            cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority), true);
+
+        cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
+
+        return cfg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
new file mode 100644
index 0000000..4d7a39e
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.lang.reflect.Field;
+import java.net.URI;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
+
+/**
+ * IPC cache test.
+ */
+public class IgniteHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Path to test hadoop configuration. */
+    private static final String HADOOP_FS_CFG = "modules/core/src/test/config/hadoop/core-site.xml";
+
+    /** Group size. */
+    public static final int GRP_SIZE = 128;
+
+    /** Started grid counter. */
+    private static int cnt;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs");
+        igfsCfg.setManagementPort(FileSystemConfiguration.DFLT_MGMT_PORT + cnt);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
+        endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
+
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setCacheConfiguration(cacheConfiguration());
+
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+
+        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
+
+        commSpi.setSharedMemoryPort(-1);
+
+        cfg.setCommunicationSpi(commSpi);
+
+        cnt++;
+
+        return cfg;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @return Cache configuration.
+     */
+    private CacheConfiguration[] cacheConfiguration() {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrids(4);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+    }
+
+    /**
+     * Test how IPC cache map works.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    public void testIpcCache() throws Exception {
+        Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache");
+
+        cacheField.setAccessible(true);
+
+        Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt");
+
+        activeCntField.setAccessible(true);
+
+        Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>)cacheField.get(null);
+
+        cache.clear(); // avoid influence of previous tests in the same process.
+
+        String name = "igfs:" + getTestGridName(0) + "@";
+
+        Configuration cfg = new Configuration();
+
+        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
+        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+        cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true);
+
+        // Ensure that existing IO is reused.
+        FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg);
+
+        assertEquals(1, cache.size());
+
+        HadoopIgfsIpcIo io = null;
+
+        System.out.println("CACHE: " + cache);
+
+        for (String key : cache.keySet()) {
+            if (key.contains("10500")) {
+                io = cache.get(key);
+
+                break;
+            }
+        }
+
+        assert io != null;
+
+        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
+
+        // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
+        FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg);
+
+        assertEquals(1, cache.size());
+        assertEquals(2, ((AtomicInteger)activeCntField.get(io)).get());
+
+        fs2.close();
+
+        assertEquals(1, cache.size());
+        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
+
+        Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping");
+
+        stopField.setAccessible(true);
+
+        assert !(Boolean)stopField.get(io);
+
+        // Ensure that IO is stopped when nobody else is need it.
+        fs1.close();
+
+        assert cache.isEmpty();
+
+        assert (Boolean)stopField.get(io);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
new file mode 100644
index 0000000..3013311
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.typedef.internal.SB;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FilenameFilter;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.DELIM_FIELD;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.DELIM_FIELD_VAL;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.HDR;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_CLOSE_IN;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_CLOSE_OUT;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DELETE;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DIR_LIST;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DIR_MAKE;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_MARK;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_OPEN_IN;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_OPEN_OUT;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RANDOM_READ;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RENAME;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RESET;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_SEEK;
+import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_SKIP;
+
+/**
+ * Grid IGFS client logger test.
+ */
+public class IgniteHadoopFileSystemLoggerSelfTest extends IgfsCommonAbstractTest {
+    /** Path string. */
+    private static final String PATH_STR = "/dir1/dir2/file;test";
+
+    /** Path string with escaped semicolons. */
+    private static final String PATH_STR_ESCAPED = PATH_STR.replace(';', '~');
+
+    /** Path. */
+    private static final IgfsPath PATH = new IgfsPath(PATH_STR);
+
+    /** IGFS name. */
+    private static final String IGFS_NAME = "igfs";
+
+    /** Log file path. */
+    private static final String LOG_DIR = U.getIgniteHome();
+
+    /** Endpoint address. */
+    private static final String ENDPOINT = "localhost:10500";
+
+    /** Log file name. */
+    private static final String LOG_FILE = LOG_DIR + File.separator + "igfs-log-" + IGFS_NAME + "-" + U.jvmPid() +
+        ".csv";
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        removeLogs();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        removeLogs();
+    }
+
+    /**
+     * Remove existing logs.
+     *
+     * @throws Exception If failed.
+     */
+    private void removeLogs() throws Exception {
+        File dir = new File(LOG_DIR);
+
+        File[] logs = dir.listFiles(new FilenameFilter() {
+            @Override public boolean accept(File dir, String name) {
+                return name.startsWith("igfs-log-");
+            }
+        });
+
+        for (File log : logs)
+            log.delete();
+    }
+
+    /**
+     * Ensure correct static loggers creation/removal as well as file creation.
+     *
+     * @throws Exception If failed.
+     */
+    public void testCreateDelete() throws Exception {
+        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        IgfsLogger sameLog0 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        // Loggers for the same endpoint must be the same object.
+        assert log == sameLog0;
+
+        IgfsLogger otherLog = IgfsLogger.logger("other" + ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        // Logger for another endpoint must be different.
+        assert log != otherLog;
+
+        otherLog.close();
+
+        log.logDelete(PATH, PRIMARY, false);
+
+        log.close();
+
+        File logFile = new File(LOG_FILE);
+
+        // When there are multiple loggers, closing one must not force flushing.
+        assert !logFile.exists();
+
+        IgfsLogger sameLog1 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        assert sameLog0 == sameLog1;
+
+        sameLog0.close();
+
+        assert !logFile.exists();
+
+        sameLog1.close();
+
+        // When we cloe the last logger, it must flush data to disk.
+        assert logFile.exists();
+
+        logFile.delete();
+
+        IgfsLogger sameLog2 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        // This time we expect new logger instance to be created.
+        assert sameLog0 != sameLog2;
+
+        sameLog2.close();
+
+        // As we do not add any records to the logger, we do not expect flushing.
+        assert !logFile.exists();
+    }
+
+    /**
+     * Test read operations logging.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLogRead() throws Exception {
+        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        log.logOpen(1, PATH, PRIMARY, 2, 3L);
+        log.logRandomRead(1, 4L, 5);
+        log.logSeek(1, 6L);
+        log.logSkip(1, 7L);
+        log.logMark(1, 8L);
+        log.logReset(1);
+        log.logCloseIn(1, 9L, 10L, 11);
+
+        log.close();
+
+        checkLog(
+            new SB().a(U.jvmPid() + d() + TYPE_OPEN_IN + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() + 2 +
+                d() + 3 + d(14)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_RANDOM_READ + d(3) + 1 + d(7) + 4 + d() + 5 + d(8)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_SEEK + d(3) + 1 + d(7) + 6 + d(9)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_SKIP + d(3) + 1 + d(9) + 7 + d(7)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_MARK + d(3) + 1 + d(10) + 8 + d(6)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_RESET + d(3) + 1 + d(16)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_IN + d(3) + 1 + d(11) + 9 + d() + 10 + d() + 11 + d(3)).toString()
+        );
+    }
+
+    /**
+     * Test write operations logging.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLogWrite() throws Exception {
+        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        log.logCreate(1, PATH, PRIMARY, true, 2, new Integer(3).shortValue(), 4L);
+        log.logAppend(2, PATH, PRIMARY, 8);
+        log.logCloseOut(2, 9L, 10L, 11);
+
+        log.close();
+
+        checkLog(
+            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() +
+                2 + d(2) + 0 + d() + 1 + d() + 3 + d() + 4 + d(10)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 2 + d() +
+                8 + d(2) + 1 + d(13)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_OUT + d(3) + 2 + d(11) + 9 + d() + 10 + d() + 11 + d(3))
+                .toString()
+        );
+    }
+
+    /**
+     * Test miscellaneous operations logging.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("TooBroadScope")
+    public void testLogMisc() throws Exception {
+        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
+
+        String newFile = "/dir3/file.test";
+        String file1 = "/dir3/file1.test";
+        String file2 = "/dir3/file1.test";
+
+        log.logMakeDirectory(PATH, PRIMARY);
+        log.logRename(PATH, PRIMARY, new IgfsPath(newFile));
+        log.logListDirectory(PATH, PRIMARY, new String[] { file1, file2 });
+        log.logDelete(PATH, PRIMARY, false);
+
+        log.close();
+
+        checkLog(
+            new SB().a(U.jvmPid() + d() + TYPE_DIR_MAKE + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(17)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_RENAME + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(15) + newFile +
+                d(2)).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_DIR_LIST + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(17) + file1 +
+                DELIM_FIELD_VAL + file2).toString(),
+            new SB().a(U.jvmPid() + d() + TYPE_DELETE + d(1) + PATH_STR_ESCAPED + d() + PRIMARY + d(16) + 0 +
+                d()).toString()
+        );
+    }
+
+    /**
+     * Ensure that log file has only the following lines.
+     *
+     * @param lines Expected lines.
+     */
+    private void checkLog(String... lines) throws Exception {
+        BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(LOG_FILE)));
+
+        List<String> logLines = new ArrayList<>(lines.length);
+
+        String nextLogLine;
+
+        while ((nextLogLine = br.readLine()) != null)
+            logLines.add(nextLogLine);
+
+        U.closeQuiet(br);
+
+        assertEquals(lines.length + 1, logLines.size());
+
+        assertEquals(logLines.get(0), HDR);
+
+        for (int i = 0; i < lines.length; i++) {
+            String logLine = logLines.get(i + 1);
+
+            logLine = logLine.substring(logLine.indexOf(DELIM_FIELD, logLine.indexOf(DELIM_FIELD) + 1) + 1);
+
+            assertEquals(lines[i], logLine);
+        }
+    }
+
+    /**
+     * Return single field delimiter.
+     *
+     * @return Single field delimiter.
+     */
+    private String d() {
+        return d(1);
+    }
+
+    /**
+     * Return a bunch of field delimiters.
+     *
+     * @param cnt Amount of field delimiters.
+     * @return Field delimiters.
+     */
+    private String d(int cnt) {
+        SB buf = new SB();
+
+        for (int i = 0; i < cnt; i++)
+            buf.a(DELIM_FIELD);
+
+        return buf.toString();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
new file mode 100644
index 0000000..1bd5b41
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.lang.reflect.Field;
+import java.net.URI;
+import java.nio.file.Paths;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
+
+/**
+ * Ensures that sampling is really turned on/off.
+ */
+public class IgniteHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractTest {
+    /** IGFS. */
+    private IgfsEx igfs;
+
+    /** File system. */
+    private FileSystem fs;
+
+    /** Whether logging is enabled in FS configuration. */
+    private boolean logging;
+
+    /** whether sampling is enabled. */
+    private Boolean sampling;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        U.closeQuiet(fs);
+
+        igfs = null;
+        fs = null;
+
+        G.stopAll(true);
+
+        logging = false;
+        sampling = null;
+    }
+
+    /**
+     * Startup the grid and instantiate the file system.
+     *
+     * @throws Exception If failed.
+     */
+    private void startUp() throws Exception {
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs");
+        igfsCfg.setBlockSize(512 * 1024);
+        igfsCfg.setDefaultMode(PRIMARY);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(10500);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName("igfs-grid");
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setConnectorConfiguration(null);
+
+        Ignite g = G.start(cfg);
+
+        igfs = (IgfsEx)g.fileSystem("igfs");
+
+        igfs.globalSampling(sampling);
+
+        fs = fileSystem();
+    }
+
+    /**
+     * When logging is disabled and sampling is not set no-op logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingDisabledSamplingNotSet() throws Exception {
+        startUp();
+
+        assert !logEnabled();
+    }
+
+    /**
+     * When logging is enabled and sampling is not set file logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingEnabledSamplingNotSet() throws Exception {
+        logging = true;
+
+        startUp();
+
+        assert logEnabled();
+    }
+
+    /**
+     * When logging is disabled and sampling is disabled no-op logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingDisabledSamplingDisabled() throws Exception {
+        sampling = false;
+
+        startUp();
+
+        assert !logEnabled();
+    }
+
+    /**
+     * When logging is enabled and sampling is disabled no-op logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingEnabledSamplingDisabled() throws Exception {
+        logging = true;
+        sampling = false;
+
+        startUp();
+
+        assert !logEnabled();
+    }
+
+    /**
+     * When logging is disabled and sampling is enabled file logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingDisabledSamplingEnabled() throws Exception {
+        sampling = true;
+
+        startUp();
+
+        assert logEnabled();
+    }
+
+    /**
+     * When logging is enabled and sampling is enabled file logger must be used.
+     *
+     * @throws Exception If failed.
+     */
+    public void testLoggingEnabledSamplingEnabled() throws Exception {
+        logging = true;
+        sampling = true;
+
+        startUp();
+
+        assert logEnabled();
+    }
+
+    /**
+     * Ensure sampling change through API causes changes in logging on subsequent client connections.
+     *
+     * @throws Exception If failed.
+     */
+    public void testSamplingChange() throws Exception {
+        // Start with sampling not set.
+        startUp();
+
+        assert !logEnabled();
+
+        fs.close();
+
+        // "Not set" => true transition.
+        igfs.globalSampling(true);
+
+        fs = fileSystem();
+
+        assert logEnabled();
+
+        fs.close();
+
+        // True => "not set" transition.
+        igfs.globalSampling(null);
+
+        fs = fileSystem();
+
+        assert !logEnabled();
+
+        // "Not-set" => false transition.
+        igfs.globalSampling(false);
+
+        fs = fileSystem();
+
+        assert !logEnabled();
+
+        fs.close();
+
+        // False => "not=set" transition.
+        igfs.globalSampling(null);
+
+        fs = fileSystem();
+
+        assert !logEnabled();
+
+        fs.close();
+
+        // True => false transition.
+        igfs.globalSampling(true);
+        igfs.globalSampling(false);
+
+        fs = fileSystem();
+
+        assert !logEnabled();
+
+        fs.close();
+
+        // False => true transition.
+        igfs.globalSampling(true);
+
+        fs = fileSystem();
+
+        assert logEnabled();
+    }
+
+    /**
+     * Ensure that log directory is set to IGFS when client FS connects.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    public void testLogDirectory() throws Exception {
+        startUp();
+
+        assertEquals(Paths.get(U.getIgniteHome()).normalize().toString(),
+            igfs.clientLogDirectory());
+    }
+
+    /**
+     * Instantiate new file system.
+     *
+     * @return New file system.
+     * @throws Exception If failed.
+     */
+    private IgniteHadoopFileSystem fileSystem() throws Exception {
+        Configuration fsCfg = new Configuration();
+
+        fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
+
+        fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        if (logging)
+            fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);
+
+        fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());
+
+        return (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
+    }
+
+    /**
+     * Ensure that real logger is used by the file system.
+     *
+     * @return {@code True} in case path is secondary.
+     * @throws Exception If failed.
+     */
+    private boolean logEnabled() throws Exception {
+        assert fs != null;
+
+        Field field = fs.getClass().getDeclaredField("clientLog");
+
+        field.setAccessible(true);
+
+        return ((IgfsLogger)field.get(fs)).isLogEnabled();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
new file mode 100644
index 0000000..6ed2249
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test.
+ */
+public abstract class IgniteHadoopFileSystemLoopbackAbstractSelfTest extends
+    IgniteHadoopFileSystemAbstractSelfTest {
+    /**
+     * Constructor.
+     *
+     * @param mode IGFS mode.
+     * @param skipEmbed Skip embedded mode flag.
+     */
+    protected IgniteHadoopFileSystemLoopbackAbstractSelfTest(IgfsMode mode, boolean skipEmbed) {
+        super(mode, skipEmbed, true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
+
+        return endpointCfg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
new file mode 100644
index 0000000..f1edb28
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode.
+ */
+public class IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest extends
+    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest() {
+        super(DUAL_ASYNC, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
new file mode 100644
index 0000000..97a6991
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode.
+ */
+public class IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest
+    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest() {
+        super(DUAL_SYNC, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
new file mode 100644
index 0000000..f9ecc4b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in PRIMARY mode.
+ */
+public class IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest
+    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest() {
+        super(PRIMARY, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
new file mode 100644
index 0000000..719df6d
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in SECONDARY mode.
+ */
+public class IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest extends
+    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest() {
+        super(PROXY, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
new file mode 100644
index 0000000..764624d
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode.
+ */
+public class IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest extends
+    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest() {
+        super(DUAL_ASYNC, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
new file mode 100644
index 0000000..21a248a
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode.
+ */
+public class IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest
+    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest() {
+        super(DUAL_SYNC, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
new file mode 100644
index 0000000..092c7a5
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in PRIMARY mode.
+ */
+public class IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest
+    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest() {
+        super(PRIMARY, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
new file mode 100644
index 0000000..9f7d21b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * IGFS Hadoop file system IPC loopback self test in SECONDARY mode.
+ */
+public class IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest extends
+    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
+
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest() {
+        super(PROXY, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
new file mode 100644
index 0000000..1b48870
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+
+import java.net.URI;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * Ensures correct modes resolution for SECONDARY paths.
+ */
+public class IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest extends IgfsCommonAbstractTest {
+    /** File system. */
+    private IgniteHadoopFileSystem fs;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        U.closeQuiet(fs);
+
+        fs = null;
+
+        G.stopAll(true);
+    }
+
+    /**
+     * Perform initial startup.
+     *
+     * @param initDfltPathModes WHether to initialize default path modes.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings({"NullableProblems", "unchecked"})
+    private void startUp(boolean initDfltPathModes) throws Exception {
+        startUpSecondary();
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs");
+        igfsCfg.setBlockSize(512 * 1024);
+        igfsCfg.setInitializeDefaultPathModes(initDfltPathModes);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(10500);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        igfsCfg.setManagementPort(-1);
+        igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
+            "igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/",
+            "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
+
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName("igfs-grid");
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+
+        G.start(cfg);
+
+        Configuration fsCfg = new Configuration();
+
+        fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
+
+        fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        fs = (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
+    }
+
+    /**
+     * Startup secondary file system.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    private void startUpSecondary() throws Exception {
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs-secondary");
+        igfsCfg.setBlockSize(512 * 1024);
+        igfsCfg.setDefaultMode(PRIMARY);
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setPort(11500);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName("igfs-grid-secondary");
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+
+        G.start(cfg);
+    }
+
+    /**
+     * Test scenario when defaults are initialized.
+     *
+     * @throws Exception If failed.
+     */
+    public void testDefaultsInitialized() throws Exception {
+        check(true);
+    }
+
+    /**
+     * Test scenario when defaults are not initialized.
+     *
+     * @throws Exception If failed.
+     */
+    public void testDefaultsNotInitialized() throws Exception {
+        check(false);
+    }
+
+    /**
+     * Actual check.
+     *
+     * @param initDfltPathModes Whether to initialize default path modes.
+     * @throws Exception If failed.
+     */
+    private void check(boolean initDfltPathModes) throws Exception {
+        startUp(initDfltPathModes);
+
+        assertEquals(initDfltPathModes, fs.hasSecondaryFileSystem());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
new file mode 100644
index 0000000..d8cf74c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.concurrent.Callable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.util.ipc.IpcEndpoint;
+import org.apache.ignite.internal.util.ipc.IpcEndpointFactory;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * IGFS Hadoop file system IPC self test.
+ */
+public abstract class IgniteHadoopFileSystemShmemAbstractSelfTest extends IgniteHadoopFileSystemAbstractSelfTest {
+    /**
+     * Constructor.
+     *
+     * @param mode IGFS mode.
+     * @param skipEmbed Skip embedded mode flag.
+     */
+    protected IgniteHadoopFileSystemShmemAbstractSelfTest(IgfsMode mode, boolean skipEmbed) {
+        super(mode, skipEmbed, false);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
+        endpointCfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
+
+        return endpointCfg;
+    }
+
+    /**
+     * Checks correct behaviour in case when we run out of system
+     * resources.
+     *
+     * @throws Exception If error occurred.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testOutOfResources() throws Exception {
+        final Collection<IpcEndpoint> eps = new LinkedList<>();
+
+        try {
+            IgniteCheckedException e = (IgniteCheckedException)GridTestUtils.assertThrows(log, new Callable<Object>() {
+                @SuppressWarnings("InfiniteLoopStatement")
+                @Override public Object call() throws Exception {
+                    while (true) {
+                        IpcEndpoint ep = IpcEndpointFactory.connectEndpoint("shmem:10500", log);
+
+                        eps.add(ep);
+                    }
+                }
+            }, IgniteCheckedException.class, null);
+
+            assertNotNull(e);
+
+            String msg = e.getMessage();
+
+            assertTrue("Invalid exception: " + X.getFullStackTrace(e),
+                msg.contains("(error code: 28)") ||
+                msg.contains("(error code: 24)") ||
+                msg.contains("(error code: 12)"));
+        }
+        finally {
+            for (IpcEndpoint ep : eps)
+                ep.close();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
new file mode 100644
index 0000000..d0d570f
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode.
+ */
+public class IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest() {
+        super(DUAL_ASYNC, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
new file mode 100644
index 0000000..2e5b015
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode.
+ */
+public class IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest() {
+        super(DUAL_SYNC, false);
+    }
+}
\ No newline at end of file


[08/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
deleted file mode 100644
index 1344e26..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.client.hadoop;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.util.StringTokenizer;
-import java.util.UUID;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.hadoop.HadoopAbstractSelfTest;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.util.lang.GridAbsPredicate;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.testframework.GridTestUtils;
-
-/**
- * Hadoop client protocol tests in external process mode.
- */
-@SuppressWarnings("ResultOfMethodCallIgnored")
-public class HadoopClientProtocolSelfTest extends HadoopAbstractSelfTest {
-    /** Input path. */
-    private static final String PATH_INPUT = "/input";
-
-    /** Output path. */
-    private static final String PATH_OUTPUT = "/output";
-
-    /** Job name. */
-    private static final String JOB_NAME = "myJob";
-
-    /** Setup lock file. */
-    private static File setupLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
-        "ignite-lock-setup.file");
-
-    /** Map lock file. */
-    private static File mapLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
-        "ignite-lock-map.file");
-
-    /** Reduce lock file. */
-    private static File reduceLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
-        "ignite-lock-reduce.file");
-
-    /** {@inheritDoc} */
-    @Override protected int gridCount() {
-        return 2;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean igfsEnabled() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean restEnabled() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        super.beforeTestsStarted();
-
-        startGrids(gridCount());
-
-        setupLockFile.delete();
-        mapLockFile.delete();
-        reduceLockFile.delete();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        stopAllGrids();
-
-        super.afterTestsStopped();
-
-//        IgniteHadoopClientProtocolProvider.cliMap.clear();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        setupLockFile.createNewFile();
-        mapLockFile.createNewFile();
-        reduceLockFile.createNewFile();
-
-        setupLockFile.deleteOnExit();
-        mapLockFile.deleteOnExit();
-        reduceLockFile.deleteOnExit();
-
-        super.beforeTest();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        grid(0).fileSystem(HadoopAbstractSelfTest.igfsName).format();
-
-        setupLockFile.delete();
-        mapLockFile.delete();
-        reduceLockFile.delete();
-
-        super.afterTest();
-    }
-
-    /**
-     * Test next job ID generation.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private void tstNextJobId() throws Exception {
-        IgniteHadoopClientProtocolProvider provider = provider();
-
-        ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
-
-        JobID jobId = proto.getNewJobID();
-
-        assert jobId != null;
-        assert jobId.getJtIdentifier() != null;
-
-        JobID nextJobId = proto.getNewJobID();
-
-        assert nextJobId != null;
-        assert nextJobId.getJtIdentifier() != null;
-
-        assert !F.eq(jobId, nextJobId);
-    }
-
-    /**
-     * Tests job counters retrieval.
-     *
-     * @throws Exception If failed.
-     */
-    public void testJobCounters() throws Exception {
-        IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
-
-        igfs.mkdirs(new IgfsPath(PATH_INPUT));
-
-        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(
-            new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
-
-            bw.write(
-                "alpha\n" +
-                "beta\n" +
-                "gamma\n" +
-                "alpha\n" +
-                "beta\n" +
-                "gamma\n" +
-                "alpha\n" +
-                "beta\n" +
-                "gamma\n"
-            );
-        }
-
-        Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
-
-        final Job job = Job.getInstance(conf);
-
-        job.setOutputKeyClass(Text.class);
-        job.setOutputValueClass(IntWritable.class);
-
-        job.setMapperClass(TestCountingMapper.class);
-        job.setReducerClass(TestCountingReducer.class);
-        job.setCombinerClass(TestCountingCombiner.class);
-
-        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
-        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
-
-        job.submit();
-
-        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
-
-        assertEquals(0, cntr.getValue());
-
-        cntr.increment(10);
-
-        assertEquals(10, cntr.getValue());
-
-        // Transferring to map phase.
-        setupLockFile.delete();
-
-        // Transferring to reduce phase.
-        mapLockFile.delete();
-
-        job.waitForCompletion(false);
-
-        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
-
-        final Counters counters = job.getCounters();
-
-        assertNotNull("counters cannot be null", counters);
-        assertEquals("wrong counters count", 3, counters.countCounters());
-        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
-        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
-        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
-    }
-
-    /**
-     * Tests job counters retrieval for unknown job id.
-     *
-     * @throws Exception If failed.
-     */
-    private void tstUnknownJobCounters() throws Exception {
-        IgniteHadoopClientProtocolProvider provider = provider();
-
-        ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
-
-        try {
-            proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1));
-            fail("exception must be thrown");
-        }
-        catch (Exception e) {
-            assert e instanceof IOException : "wrong error has been thrown";
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    private void tstJobSubmitMap() throws Exception {
-        checkJobSubmit(true, true);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    private void tstJobSubmitMapCombine() throws Exception {
-        checkJobSubmit(false, true);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    private void tstJobSubmitMapReduce() throws Exception {
-        checkJobSubmit(true, false);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    private void tstJobSubmitMapCombineReduce() throws Exception {
-        checkJobSubmit(false, false);
-    }
-
-    /**
-     * Test job submission.
-     *
-     * @param noCombiners Whether there are no combiners.
-     * @param noReducers Whether there are no reducers.
-     * @throws Exception If failed.
-     */
-    public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
-        IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
-
-        igfs.mkdirs(new IgfsPath(PATH_INPUT));
-
-        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(
-            new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
-
-            bw.write("word");
-        }
-
-        Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
-
-        final Job job = Job.getInstance(conf);
-
-        job.setJobName(JOB_NAME);
-
-        job.setOutputKeyClass(Text.class);
-        job.setOutputValueClass(IntWritable.class);
-
-        job.setMapperClass(TestMapper.class);
-        job.setReducerClass(TestReducer.class);
-
-        if (!noCombiners)
-            job.setCombinerClass(TestCombiner.class);
-
-        if (noReducers)
-            job.setNumReduceTasks(0);
-
-        job.setInputFormatClass(TextInputFormat.class);
-        job.setOutputFormatClass(TestOutputFormat.class);
-
-        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
-        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
-
-        job.submit();
-
-        JobID jobId = job.getJobID();
-
-        // Setup phase.
-        JobStatus jobStatus = job.getStatus();
-        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
-        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
-        assert jobStatus.getMapProgress() == 0.0f;
-        assert jobStatus.getReduceProgress() == 0.0f;
-
-        U.sleep(2100);
-
-        JobStatus recentJobStatus = job.getStatus();
-
-        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() :
-            "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
-
-        // Transferring to map phase.
-        setupLockFile.delete();
-
-        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
-            @Override public boolean apply() {
-                try {
-                    return F.eq(1.0f, job.getStatus().getSetupProgress());
-                }
-                catch (Exception e) {
-                    throw new RuntimeException("Unexpected exception.", e);
-                }
-            }
-        }, 5000L);
-
-        // Map phase.
-        jobStatus = job.getStatus();
-        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
-        assert jobStatus.getSetupProgress() == 1.0f;
-        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
-        assert jobStatus.getReduceProgress() == 0.0f;
-
-        U.sleep(2100);
-
-        recentJobStatus = job.getStatus();
-
-        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() :
-            "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
-
-        // Transferring to reduce phase.
-        mapLockFile.delete();
-
-        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
-            @Override public boolean apply() {
-                try {
-                    return F.eq(1.0f, job.getStatus().getMapProgress());
-                }
-                catch (Exception e) {
-                    throw new RuntimeException("Unexpected exception.", e);
-                }
-            }
-        }, 5000L);
-
-        if (!noReducers) {
-            // Reduce phase.
-            jobStatus = job.getStatus();
-            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
-            assert jobStatus.getSetupProgress() == 1.0f;
-            assert jobStatus.getMapProgress() == 1.0f;
-            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
-
-            // Ensure that reduces progress increases.
-            U.sleep(2100);
-
-            recentJobStatus = job.getStatus();
-
-            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() :
-                "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
-
-            reduceLockFile.delete();
-        }
-
-        job.waitForCompletion(false);
-
-        jobStatus = job.getStatus();
-        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
-        assert jobStatus.getSetupProgress() == 1.0f;
-        assert jobStatus.getMapProgress() == 1.0f;
-        assert jobStatus.getReduceProgress() == 1.0f;
-
-        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
-    }
-
-    /**
-     * Dump IGFS content.
-     *
-     * @param igfs IGFS.
-     * @param path Path.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private static void dumpIgfs(IgniteFileSystem igfs, IgfsPath path) throws Exception {
-        IgfsFile file = igfs.info(path);
-
-        assert file != null;
-
-        System.out.println(file.path());
-
-        if (file.isDirectory()) {
-            for (IgfsPath child : igfs.listPaths(path))
-                dumpIgfs(igfs, child);
-        }
-        else {
-            try (BufferedReader br = new BufferedReader(new InputStreamReader(igfs.open(path)))) {
-                String line = br.readLine();
-
-                while (line != null) {
-                    System.out.println(line);
-
-                    line = br.readLine();
-                }
-            }
-        }
-    }
-
-    /**
-     * Check job status.
-     *
-     * @param status Job status.
-     * @param expJobId Expected job ID.
-     * @param expJobName Expected job name.
-     * @param expState Expected state.
-     * @param expCleanupProgress Expected cleanup progress.
-     * @throws Exception If failed.
-     */
-    private static void checkJobStatus(JobStatus status, JobID expJobId, String expJobName,
-        JobStatus.State expState, float expCleanupProgress) throws Exception {
-        assert F.eq(status.getJobID(), expJobId) : "Expected=" + expJobId + ", actual=" + status.getJobID();
-        assert F.eq(status.getJobName(), expJobName) : "Expected=" + expJobName + ", actual=" + status.getJobName();
-        assert F.eq(status.getState(), expState) : "Expected=" + expState + ", actual=" + status.getState();
-        assert F.eq(status.getCleanupProgress(), expCleanupProgress) :
-            "Expected=" + expCleanupProgress + ", actual=" + status.getCleanupProgress();
-    }
-
-    /**
-     * @return Configuration.
-     */
-    private Configuration config(int port) {
-        Configuration conf = HadoopUtils.safeCreateConfiguration();
-
-        setupFileSystems(conf);
-
-        conf.set(MRConfig.FRAMEWORK_NAME, IgniteHadoopClientProtocolProvider.FRAMEWORK_NAME);
-        conf.set(MRConfig.MASTER_ADDRESS, "127.0.0.1:" + port);
-
-        conf.set("fs.defaultFS", "igfs://:" + getTestGridName(0) + "@/");
-
-        return conf;
-    }
-
-    /**
-     * @return Protocol provider.
-     */
-    private IgniteHadoopClientProtocolProvider provider() {
-        return new IgniteHadoopClientProtocolProvider();
-    }
-
-    /**
-     * Test mapper.
-     */
-    public static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
-        /** Writable container for writing word. */
-        private Text word = new Text();
-
-        /** Writable integer constant of '1' is writing as count of found words. */
-        private static final IntWritable one = new IntWritable(1);
-
-        /** {@inheritDoc} */
-        @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
-            while (mapLockFile.exists())
-                Thread.sleep(50);
-
-            StringTokenizer wordList = new StringTokenizer(val.toString());
-
-            while (wordList.hasMoreTokens()) {
-                word.set(wordList.nextToken());
-
-                ctx.write(word, one);
-            }
-        }
-    }
-
-    /**
-     * Test Hadoop counters.
-     */
-    public enum TestCounter {
-        COUNTER1, COUNTER2, COUNTER3
-    }
-
-    /**
-     * Test mapper that uses counters.
-     */
-    public static class TestCountingMapper extends TestMapper {
-        /** {@inheritDoc} */
-        @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
-            super.map(key, val, ctx);
-            ctx.getCounter(TestCounter.COUNTER1).increment(1);
-        }
-    }
-
-    /**
-     * Test combiner that counts invocations.
-     */
-    public static class TestCountingCombiner extends TestReducer {
-        @Override public void reduce(Text key, Iterable<IntWritable> values,
-            Context ctx) throws IOException, InterruptedException {
-            ctx.getCounter(TestCounter.COUNTER1).increment(1);
-            ctx.getCounter(TestCounter.COUNTER2).increment(1);
-
-            int sum = 0;
-            for (IntWritable value : values)
-                sum += value.get();
-
-            ctx.write(key, new IntWritable(sum));
-        }
-    }
-
-    /**
-     * Test reducer that counts invocations.
-     */
-    public static class TestCountingReducer extends TestReducer {
-        @Override public void reduce(Text key, Iterable<IntWritable> values,
-            Context ctx) throws IOException, InterruptedException {
-            ctx.getCounter(TestCounter.COUNTER1).increment(1);
-            ctx.getCounter(TestCounter.COUNTER3).increment(1);
-        }
-    }
-
-    /**
-     * Test combiner.
-     */
-    public static class TestCombiner extends Reducer<Text, IntWritable, Text, IntWritable> {
-        // No-op.
-    }
-
-    public static class TestOutputFormat<K, V> extends TextOutputFormat<K, V> {
-        /** {@inheritDoc} */
-        @Override public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext ctx)
-            throws IOException {
-            return new TestOutputCommitter(ctx, (FileOutputCommitter)super.getOutputCommitter(ctx));
-        }
-    }
-
-    /**
-     * Test output committer.
-     */
-    private static class TestOutputCommitter extends FileOutputCommitter {
-        /** Delegate. */
-        private final FileOutputCommitter delegate;
-
-        /**
-         * Constructor.
-         *
-         * @param ctx Task attempt context.
-         * @param delegate Delegate.
-         * @throws IOException If failed.
-         */
-        private TestOutputCommitter(TaskAttemptContext ctx, FileOutputCommitter delegate) throws IOException {
-            super(FileOutputFormat.getOutputPath(ctx), ctx);
-
-            this.delegate = delegate;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void setupJob(JobContext jobCtx) throws IOException {
-            try {
-                while (setupLockFile.exists())
-                    Thread.sleep(50);
-            }
-            catch (InterruptedException ignored) {
-                throw new IOException("Interrupted.");
-            }
-
-            delegate.setupJob(jobCtx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void setupTask(TaskAttemptContext taskCtx) throws IOException {
-            delegate.setupTask(taskCtx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean needsTaskCommit(TaskAttemptContext taskCtx) throws IOException {
-            return delegate.needsTaskCommit(taskCtx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void commitTask(TaskAttemptContext taskCtx) throws IOException {
-            delegate.commitTask(taskCtx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void abortTask(TaskAttemptContext taskCtx) throws IOException {
-            delegate.abortTask(taskCtx);
-        }
-    }
-
-    /**
-     * Test reducer.
-     */
-    public static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
-        /** Writable container for writing sum of word counts. */
-        private IntWritable totalWordCnt = new IntWritable();
-
-        /** {@inheritDoc} */
-        @Override public void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
-            InterruptedException {
-            while (reduceLockFile.exists())
-                Thread.sleep(50);
-
-            int wordCnt = 0;
-
-            for (IntWritable value : values)
-                wordCnt += value.get();
-
-            totalWordCnt.set(wordCnt);
-
-            ctx.write(key, totalWordCnt);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java b/modules/hadoop/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
deleted file mode 100644
index 6f910f1..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.cache;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
-import org.apache.ignite.internal.processors.cache.IgniteTxConfigCacheSelfTest;
-import org.apache.ignite.internal.util.typedef.internal.CU;
-
-/**
- * Test checks whether hadoop system cache doesn't use user defined TX config.
- */
-public class HadoopTxConfigCacheTest  extends IgniteTxConfigCacheSelfTest {
-    /**
-     * Success if system caches weren't timed out.
-     *
-     * @throws Exception
-     */
-    public void testSystemCacheTx() throws Exception {
-        final Ignite ignite = grid(0);
-
-        final IgniteInternalCache<Object, Object> hadoopCache = getSystemCache(ignite, CU.SYS_CACHE_HADOOP_MR);
-
-        checkImplicitTxSuccess(hadoopCache);
-        checkStartTxSuccess(hadoopCache);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
deleted file mode 100644
index ea7fa99..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.ObjectInput;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutput;
-import java.io.ObjectOutputStream;
-import java.util.concurrent.Callable;
-
-import org.apache.ignite.testframework.GridTestUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.junit.Assert;
-
-/**
- * Tests KerberosHadoopFileSystemFactory.
- */
-public class KerberosHadoopFileSystemFactorySelfTest extends GridCommonAbstractTest {
-    /**
-     * Test parameters validation.
-     *
-     * @throws Exception If failed.
-     */
-    public void testParameters() throws Exception {
-        checkParameters(null, null, -1);
-
-        checkParameters(null, null, 100);
-        checkParameters(null, "b", -1);
-        checkParameters("a", null, -1);
-
-        checkParameters(null, "b", 100);
-        checkParameters("a", null, 100);
-        checkParameters("a", "b", -1);
-    }
-
-    /**
-     * Check parameters.
-     *
-     * @param keyTab Key tab.
-     * @param keyTabPrincipal Key tab principal.
-     * @param reloginInterval Re-login interval.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    private void checkParameters(String keyTab, String keyTabPrincipal, long reloginInterval) {
-        final KerberosHadoopFileSystemFactory fac = new KerberosHadoopFileSystemFactory();
-
-        fac.setKeyTab(keyTab);
-        fac.setKeyTabPrincipal(keyTabPrincipal);
-        fac.setReloginInterval(reloginInterval);
-
-        GridTestUtils.assertThrows(null, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fac.start();
-
-                return null;
-            }
-        }, IllegalArgumentException.class, null);
-    }
-
-    /**
-     * Checks serializatuion and deserialization of the secure factory.
-     *
-     * @throws Exception If failed.
-     */
-    public void testSerialization() throws Exception {
-        KerberosHadoopFileSystemFactory fac = new KerberosHadoopFileSystemFactory();
-
-        checkSerialization(fac);
-
-        fac = new KerberosHadoopFileSystemFactory();
-
-        fac.setUri("igfs://igfs@localhost:10500/");
-        fac.setConfigPaths("/a/core-sute.xml", "/b/mapred-site.xml");
-        fac.setKeyTabPrincipal("foo");
-        fac.setKeyTab("/etc/krb5.keytab");
-        fac.setReloginInterval(30 * 60 * 1000L);
-
-        checkSerialization(fac);
-    }
-
-    /**
-     * Serializes the factory,
-     *
-     * @param fac The facory to check.
-     * @throws Exception If failed.
-     */
-    private void checkSerialization(KerberosHadoopFileSystemFactory fac) throws Exception {
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-
-        ObjectOutput oo = new ObjectOutputStream(baos);
-
-        oo.writeObject(fac);
-
-        ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()));
-
-        KerberosHadoopFileSystemFactory fac2 = (KerberosHadoopFileSystemFactory)in.readObject();
-
-        assertEquals(fac.getUri(), fac2.getUri());
-        Assert.assertArrayEquals(fac.getConfigPaths(), fac2.getConfigPaths());
-        assertEquals(fac.getKeyTab(), fac2.getKeyTab());
-        assertEquals(fac.getKeyTabPrincipal(), fac2.getKeyTabPrincipal());
-        assertEquals(fac.getReloginInterval(), fac2.getReloginInterval());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
deleted file mode 100644
index fd8fdef..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.util;
-
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.jetbrains.annotations.Nullable;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Test for basic user name mapper.
- */
-public class BasicUserNameMapperSelfTest extends GridCommonAbstractTest {
-    /**
-     * Test null mappings.
-     *
-     * @throws Exception If failed.
-     */
-    public void testNullMappings() throws Exception {
-        checkNullOrEmptyMappings(null);
-    }
-
-    /**
-     * Test empty mappings.
-     *
-     * @throws Exception If failed.
-     */
-    public void testEmptyMappings() throws Exception {
-        checkNullOrEmptyMappings(new HashMap<String, String>());
-    }
-
-    /**
-     * Check null or empty mappings.
-     *
-     * @param map Mappings.
-     * @throws Exception If failed.
-     */
-    private void checkNullOrEmptyMappings(@Nullable Map<String, String> map) throws Exception {
-        BasicUserNameMapper mapper = create(map, false, null);
-
-        assertNull(mapper.map(null));
-        assertEquals("1", mapper.map("1"));
-        assertEquals("2", mapper.map("2"));
-
-        mapper = create(map, true, null);
-
-        assertNull(mapper.map(null));
-        assertNull(mapper.map("1"));
-        assertNull(mapper.map("2"));
-
-        mapper = create(map, false, "A");
-
-        assertNull(mapper.map(null));
-        assertEquals("1", mapper.map("1"));
-        assertEquals("2", mapper.map("2"));
-
-        mapper = create(map, true, "A");
-
-        assertEquals("A", mapper.map(null));
-        assertEquals("A", mapper.map("1"));
-        assertEquals("A", mapper.map("2"));
-    }
-
-    /**
-     * Test regular mappings.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMappings() throws Exception {
-        Map<String, String> map = new HashMap<>();
-
-        map.put("1", "101");
-
-        BasicUserNameMapper mapper = create(map, false, null);
-
-        assertNull(mapper.map(null));
-        assertEquals("101", mapper.map("1"));
-        assertEquals("2", mapper.map("2"));
-
-        mapper = create(map, true, null);
-
-        assertNull(mapper.map(null));
-        assertEquals("101", mapper.map("1"));
-        assertNull(mapper.map("2"));
-
-        mapper = create(map, false, "A");
-
-        assertNull(mapper.map(null));
-        assertEquals("101", mapper.map("1"));
-        assertEquals("2", mapper.map("2"));
-
-        mapper = create(map, true, "A");
-
-        assertEquals("A", mapper.map(null));
-        assertEquals("101", mapper.map("1"));
-        assertEquals("A", mapper.map("2"));
-    }
-
-    /**
-     * Create mapper.
-     *
-     * @param dictionary Dictionary.
-     * @param useDfltUsrName Whether to use default user name.
-     * @param dfltUsrName Default user name.
-     * @return Mapper.
-     */
-    private BasicUserNameMapper create(@Nullable Map<String, String> dictionary, boolean useDfltUsrName,
-        @Nullable String dfltUsrName) {
-        BasicUserNameMapper mapper = new BasicUserNameMapper();
-
-        mapper.setMappings(dictionary);
-        mapper.setUseDefaultUserName(useDfltUsrName);
-        mapper.setDefaultUserName(dfltUsrName);
-
-        return mapper;
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
deleted file mode 100644
index bfac49c..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.util;
-
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-import java.util.Collections;
-import java.util.concurrent.Callable;
-
-/**
- * Tests for chained user name mapper.
- */
-public class ChainedUserNameMapperSelfTest extends GridCommonAbstractTest {
-    /** Test instance. */
-    private static final String INSTANCE = "test_instance";
-
-    /** Test realm. */
-    private static final String REALM = "test_realm";
-
-    /**
-     * Test case when mappers are null.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testNullMappers() throws Exception {
-        GridTestUtils.assertThrows(null, new Callable<Void>() {
-            @Override public Void call() throws Exception {
-                create((UserNameMapper[])null);
-
-                return null;
-            }
-        }, IgniteException.class, null);
-    }
-
-    /**
-     * Test case when one of mappers is null.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testNullMapperElement() throws Exception {
-        GridTestUtils.assertThrows(null, new Callable<Void>() {
-            @Override public Void call() throws Exception {
-                create(new BasicUserNameMapper(), null);
-
-                return null;
-            }
-        }, IgniteException.class, null);
-    }
-
-    /**
-     * Test actual chaining logic.
-     *
-     * @throws Exception If failed.
-     */
-    public void testChaining() throws Exception {
-        BasicUserNameMapper mapper1 = new BasicUserNameMapper();
-
-        mapper1.setMappings(Collections.singletonMap("1", "101"));
-
-        KerberosUserNameMapper mapper2 = new KerberosUserNameMapper();
-
-        mapper2.setInstance(INSTANCE);
-        mapper2.setRealm(REALM);
-
-        ChainedUserNameMapper mapper = create(mapper1, mapper2);
-
-        assertEquals("101" + "/" + INSTANCE + "@" + REALM, mapper.map("1"));
-        assertEquals("2" + "/" + INSTANCE + "@" + REALM, mapper.map("2"));
-        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE + "@" + REALM, mapper.map(null));
-    }
-
-    /**
-     * Create chained mapper.
-     *
-     * @param mappers Child mappers.
-     * @return Chained mapper.
-     */
-    private ChainedUserNameMapper create(UserNameMapper... mappers) {
-        ChainedUserNameMapper mapper = new ChainedUserNameMapper();
-
-        mapper.setMappers(mappers);
-
-        mapper.start();
-
-        return mapper;
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
deleted file mode 100644
index cc685bb..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.util;
-
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Tests for Kerberos name mapper.
- */
-public class KerberosUserNameMapperSelfTest extends GridCommonAbstractTest {
-    /** Test instance. */
-    private static final String INSTANCE = "test_instance";
-
-    /** Test realm. */
-    private static final String REALM = "test_realm";
-
-    /**
-     * Test mapper without instance and realm components.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMapper() throws Exception {
-        KerberosUserNameMapper mapper = create(null, null);
-
-        assertEquals(IgfsUtils.fixUserName(null), mapper.map(null));
-        assertEquals("test", mapper.map("test"));
-    }
-
-    /**
-     * Test mapper with instance component.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMapperInstance() throws Exception {
-        KerberosUserNameMapper mapper = create(INSTANCE, null);
-
-        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE, mapper.map(null));
-        assertEquals("test" + "/" + INSTANCE, mapper.map("test"));
-    }
-
-    /**
-     * Test mapper with realm.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMapperRealm() throws Exception {
-        KerberosUserNameMapper mapper = create(null, REALM);
-
-        assertEquals(IgfsUtils.fixUserName(null) + "@" + REALM, mapper.map(null));
-        assertEquals("test" + "@" + REALM, mapper.map("test"));
-    }
-
-    /**
-     * Test mapper with instance and realm components.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMapperInstanceAndRealm() throws Exception {
-        KerberosUserNameMapper mapper = create(INSTANCE, REALM);
-
-        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE + "@" + REALM, mapper.map(null));
-        assertEquals("test" + "/" + INSTANCE + "@" + REALM, mapper.map("test"));
-    }
-
-    /**
-     * Create mapper.
-     *
-     * @param instance Instance.
-     * @param realm Realm.
-     * @return Mapper.
-     */
-    private KerberosUserNameMapper create(@Nullable String instance, @Nullable String realm) {
-        KerberosUserNameMapper mapper = new KerberosUserNameMapper();
-
-        mapper.setInstance(instance);
-        mapper.setRealm(realm);
-
-        mapper.start();
-
-        return mapper;
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
deleted file mode 100644
index 2c25a06..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.hadoop.util.ChainedUserNameMapper;
-import org.apache.ignite.hadoop.util.KerberosUserNameMapper;
-import org.apache.ignite.hadoop.util.UserNameMapper;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.internal.processors.igfs.IgfsDualAbstractSelfTest;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.IgniteFileSystem.IGFS_SCHEME;
-import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.SECONDARY_CFG_PATH;
-import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.configuration;
-import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.mkUri;
-import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.writeConfiguration;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * Abstract test for Hadoop 1.0 file system stack.
- */
-public abstract class Hadoop1DualAbstractTest extends IgfsDualAbstractSelfTest {
-    /** Secondary grid name */
-    private static final String GRID_NAME = "grid_secondary";
-
-    /** Secondary file system name */
-    private static final String IGFS_NAME = "igfs_secondary";
-
-    /** Secondary file system REST endpoint port */
-    private static final int PORT = 11500;
-
-    /** Secondary file system REST endpoint configuration map. */
-    private static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration() {{
-        setType(IgfsIpcEndpointType.TCP);
-        setPort(PORT);
-    }};
-
-    /** Secondary file system authority. */
-    private static final String SECONDARY_AUTHORITY = IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + PORT;
-
-    /** Secondary Fs configuration full path. */
-    protected String secondaryConfFullPath;
-
-    /** Secondary Fs URI. */
-    protected String secondaryUri;
-
-    /** Constructor. */
-    public Hadoop1DualAbstractTest(IgfsMode mode) {
-        super(mode);
-    }
-
-    /**
-     * Creates secondary filesystems.
-     * @return IgfsSecondaryFileSystem
-     * @throws Exception On failure.
-     */
-    @Override protected IgfsSecondaryFileSystem createSecondaryFileSystemStack() throws Exception {
-        startUnderlying();
-
-        prepareConfiguration();
-
-        KerberosUserNameMapper mapper1 = new KerberosUserNameMapper();
-
-        mapper1.setRealm("TEST.COM");
-
-        TestUserNameMapper mapper2 = new TestUserNameMapper();
-
-        ChainedUserNameMapper mapper = new ChainedUserNameMapper();
-
-        mapper.setMappers(mapper1, mapper2);
-
-        CachingHadoopFileSystemFactory factory = new CachingHadoopFileSystemFactory();
-
-        factory.setUri(secondaryUri);
-        factory.setConfigPaths(secondaryConfFullPath);
-        factory.setUserNameMapper(mapper);
-
-        IgniteHadoopIgfsSecondaryFileSystem second = new IgniteHadoopIgfsSecondaryFileSystem();
-
-        second.setFileSystemFactory(factory);
-
-        igfsSecondary = new HadoopIgfsSecondaryFileSystemTestAdapter(factory);
-
-        return second;
-    }
-
-    /**
-     * Starts underlying Ignite process.
-     * @throws IOException On failure.
-     */
-    protected void startUnderlying() throws Exception {
-        startGridWithIgfs(GRID_NAME, IGFS_NAME, PRIMARY, null, SECONDARY_REST_CFG, secondaryIpFinder);
-    }
-
-    /**
-     * Prepares Fs configuration.
-     * @throws IOException On failure.
-     */
-    protected void prepareConfiguration() throws IOException {
-        Configuration secondaryConf = configuration(IGFS_SCHEME, SECONDARY_AUTHORITY, true, true);
-
-        secondaryConf.setInt("fs.igfs.block.size", 1024);
-
-        secondaryConfFullPath = writeConfiguration(secondaryConf, SECONDARY_CFG_PATH);
-
-        secondaryUri = mkUri(IGFS_SCHEME, SECONDARY_AUTHORITY);
-    }
-
-    /**
-     * Test user name mapper.
-     */
-    private static class TestUserNameMapper implements UserNameMapper, LifecycleAware {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Started flag. */
-        private boolean started;
-
-        /** {@inheritDoc} */
-        @Nullable @Override public String map(String name) {
-            assert started;
-            assert name != null && name.contains("@");
-
-            return name.substring(0, name.indexOf("@"));
-        }
-
-        /** {@inheritDoc} */
-        @Override public void start() throws IgniteException {
-            started = true;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void stop() throws IgniteException {
-            // No-op.
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
deleted file mode 100644
index bbf1223..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-/**
- * DUAL_ASYNC mode test.
- */
-public class Hadoop1OverIgfsDualAsyncTest extends Hadoop1DualAbstractTest {
-    /**
-     * Constructor.
-     */
-    public Hadoop1OverIgfsDualAsyncTest() {
-        super(IgfsMode.DUAL_ASYNC);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
deleted file mode 100644
index c57415c..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-/**
- * DUAL_SYNC mode.
- */
-public class Hadoop1OverIgfsDualSyncTest extends Hadoop1DualAbstractTest {
-    /**
-     * Constructor.
-     */
-    public Hadoop1OverIgfsDualSyncTest() {
-        super(IgfsMode.DUAL_SYNC);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
deleted file mode 100644
index 5be3a64..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.jetbrains.annotations.Nullable;
-import java.io.Externalizable;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.net.URI;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-
-/**
- * Tests for Hadoop file system factory.
- */
-public class HadoopFIleSystemFactorySelfTest extends IgfsCommonAbstractTest {
-    /** Amount of "start" invocations */
-    private static final AtomicInteger START_CNT = new AtomicInteger();
-
-    /** Amount of "stop" invocations */
-    private static final AtomicInteger STOP_CNT = new AtomicInteger();
-
-    /** Path to secondary file system configuration. */
-    private static final String SECONDARY_CFG_PATH = "/work/core-site-HadoopFIleSystemFactorySelfTest.xml";
-
-    /** IGFS path for DUAL mode. */
-    private static final Path PATH_DUAL = new Path("/ignite/sync/test_dir");
-
-    /** IGFS path for PROXY mode. */
-    private static final Path PATH_PROXY = new Path("/ignite/proxy/test_dir");
-
-    /** IGFS path for DUAL mode. */
-    private static final IgfsPath IGFS_PATH_DUAL = new IgfsPath("/ignite/sync/test_dir");
-
-    /** IGFS path for PROXY mode. */
-    private static final IgfsPath IGFS_PATH_PROXY = new IgfsPath("/ignite/proxy/test_dir");
-
-    /** Secondary IGFS. */
-    private IgfsEx secondary;
-
-    /** Primary IGFS. */
-    private IgfsEx primary;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        super.beforeTest();
-
-        START_CNT.set(0);
-        STOP_CNT.set(0);
-
-        secondary = startSecondary();
-        primary = startPrimary();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        super.afterTest();
-
-        secondary = null;
-        primary = null;
-
-        stopAllGrids();
-    }
-
-    /**
-     * Test custom factory.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testCustomFactory() throws Exception {
-        assert START_CNT.get() == 1;
-        assert STOP_CNT.get() == 0;
-
-        // Use IGFS directly.
-        primary.mkdirs(IGFS_PATH_DUAL);
-
-        assert primary.exists(IGFS_PATH_DUAL);
-        assert secondary.exists(IGFS_PATH_DUAL);
-
-        // Create remote instance.
-        FileSystem fs = FileSystem.get(URI.create("igfs://primary:primary@127.0.0.1:10500/"), baseConfiguration());
-
-        // Ensure lifecycle callback was invoked.
-        assert START_CNT.get() == 2;
-        assert STOP_CNT.get() == 0;
-
-        // Check file system operations.
-        assert fs.exists(PATH_DUAL);
-
-        assert fs.delete(PATH_DUAL, true);
-        assert !primary.exists(IGFS_PATH_DUAL);
-        assert !secondary.exists(IGFS_PATH_DUAL);
-        assert !fs.exists(PATH_DUAL);
-
-        assert fs.mkdirs(PATH_DUAL);
-        assert primary.exists(IGFS_PATH_DUAL);
-        assert secondary.exists(IGFS_PATH_DUAL);
-        assert fs.exists(PATH_DUAL);
-
-        assert fs.mkdirs(PATH_PROXY);
-        assert secondary.exists(IGFS_PATH_PROXY);
-        assert fs.exists(PATH_PROXY);
-
-        // Close file system and ensure that associated factory was notified.
-        fs.close();
-
-        assert START_CNT.get() == 2;
-        assert STOP_CNT.get() == 1;
-
-        // Stop primary node and ensure that base factory was notified.
-        G.stop(primary.context().kernalContext().grid().name(), true);
-
-        assert START_CNT.get() == 2;
-        assert STOP_CNT.get() == 2;
-    }
-
-    /**
-     * Start secondary IGFS.
-     *
-     * @return IGFS.
-     * @throws Exception If failed.
-     */
-    private static IgfsEx startSecondary() throws Exception {
-        return start("secondary", 11500, IgfsMode.PRIMARY, null);
-    }
-
-    /**
-     * Start primary IGFS.
-     *
-     * @return IGFS.
-     * @throws Exception If failed.
-     */
-    private static IgfsEx startPrimary() throws Exception {
-        // Prepare configuration.
-        Configuration conf = baseConfiguration();
-
-        conf.set("fs.defaultFS", "igfs://secondary:secondary@127.0.0.1:11500/");
-
-        writeConfigurationToFile(conf);
-
-        // Configure factory.
-        TestFactory factory = new TestFactory();
-
-        factory.setUri("igfs://secondary:secondary@127.0.0.1:11500/");
-        factory.setConfigPaths(SECONDARY_CFG_PATH);
-
-        // Configure file system.
-        IgniteHadoopIgfsSecondaryFileSystem fs = new IgniteHadoopIgfsSecondaryFileSystem();
-
-        fs.setFileSystemFactory(factory);
-
-        // Start.
-        return start("primary", 10500, IgfsMode.DUAL_ASYNC, fs);
-    }
-
-    /**
-     * Start Ignite node with IGFS instance.
-     *
-     * @param name Node and IGFS name.
-     * @param endpointPort Endpoint port.
-     * @param dfltMode Default path mode.
-     * @param secondaryFs Secondary file system.
-     * @return Igfs instance.
-     */
-    private static IgfsEx start(String name, int endpointPort, IgfsMode dfltMode,
-        @Nullable IgfsSecondaryFileSystem secondaryFs) {
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setHost("127.0.0.1");
-        endpointCfg.setPort(endpointPort);
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("dataCache");
-        igfsCfg.setMetaCacheName("metaCache");
-        igfsCfg.setName(name);
-        igfsCfg.setDefaultMode(dfltMode);
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-        igfsCfg.setSecondaryFileSystem(secondaryFs);
-        igfsCfg.setInitializeDefaultPathModes(true);
-
-        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
-
-        dataCacheCfg.setName("dataCache");
-        dataCacheCfg.setCacheMode(PARTITIONED);
-        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
-        dataCacheCfg.setBackups(0);
-        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
-        dataCacheCfg.setOffHeapMaxMemory(0);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("metaCache");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName(name);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setConnectorConfiguration(null);
-
-        return (IgfsEx)G.start(cfg).fileSystem(name);
-    }
-
-    /**
-     * Create base FileSystem configuration.
-     *
-     * @return Configuration.
-     */
-    private static Configuration baseConfiguration() {
-        Configuration conf = new Configuration();
-
-        conf.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
-
-        return conf;
-    }
-
-    /**
-     * Write configuration to file.
-     *
-     * @param conf Configuration.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ResultOfMethodCallIgnored")
-    private static void writeConfigurationToFile(Configuration conf) throws Exception {
-        final String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
-
-        File file = new File(path);
-
-        file.delete();
-
-        assertFalse(file.exists());
-
-        try (FileOutputStream fos = new FileOutputStream(file)) {
-            conf.writeXml(fos);
-        }
-
-        assertTrue(file.exists());
-    }
-
-    /**
-     * Test factory.
-     */
-    private static class TestFactory extends CachingHadoopFileSystemFactory {
-        /**
-         * {@link Externalizable} support.
-         */
-        public TestFactory() {
-            // No-op.
-        }
-
-        /** {@inheritDoc} */
-        @Override public void start() throws IgniteException {
-            START_CNT.incrementAndGet();
-
-            super.start();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void stop() throws IgniteException {
-            STOP_CNT.incrementAndGet();
-
-            super.stop();
-        }
-    }
-}


[49/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
new file mode 100644
index 0000000..bd8ed2d
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
@@ -0,0 +1,1076 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs.v2;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Progressable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyInputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyOutputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsModeResolver;
+import org.apache.ignite.internal.processors.igfs.IgfsPaths;
+import org.apache.ignite.internal.processors.igfs.IgfsStatus;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
+import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
+import static org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.getFsHadoopUser;
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_COLOCATED_WRITES;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_BATCH_SIZE;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
+import static org.apache.ignite.internal.processors.igfs.IgfsEx.IGFS_SCHEME;
+
+/**
+ * {@code IGFS} Hadoop 2.x file system driver over file system API. To use
+ * {@code IGFS} as Hadoop file system, you should configure this class
+ * in Hadoop's {@code core-site.xml} as follows:
+ * <pre name="code" class="xml">
+ *  &lt;property&gt;
+ *      &lt;name&gt;fs.default.name&lt;/name&gt;
+ *      &lt;value&gt;igfs://ipc&lt;/value&gt;
+ *  &lt;/property&gt;
+ *
+ *  &lt;property&gt;
+ *      &lt;name&gt;fs.igfs.impl&lt;/name&gt;
+ *      &lt;value&gt;org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem&lt;/value&gt;
+ *  &lt;/property&gt;
+ * </pre>
+ * You should also add Ignite JAR and all libraries to Hadoop classpath. To
+ * do this, add following lines to {@code conf/hadoop-env.sh} script in Hadoop
+ * distribution:
+ * <pre name="code" class="bash">
+ * export IGNITE_HOME=/path/to/Ignite/distribution
+ * export HADOOP_CLASSPATH=$IGNITE_HOME/ignite*.jar
+ *
+ * for f in $IGNITE_HOME/libs/*.jar; do
+ *  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f;
+ * done
+ * </pre>
+ * <h1 class="header">Data vs Clients Nodes</h1>
+ * Hadoop needs to use its FileSystem remotely from client nodes as well as directly on
+ * data nodes. Client nodes are responsible for basic file system operations as well as
+ * accessing data nodes remotely. Usually, client nodes are started together
+ * with {@code job-submitter} or {@code job-scheduler} processes, while data nodes are usually
+ * started together with Hadoop {@code task-tracker} processes.
+ * <p>
+ * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml}
+ * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation.
+ */
+public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closeable {
+    /** Logger. */
+    private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystem.class);
+
+    /** Ensures that close routine is invoked at most once. */
+    private final AtomicBoolean closeGuard = new AtomicBoolean();
+
+    /** Grid remote client. */
+    private HadoopIgfsWrapper rmtClient;
+
+    /** The name of the user this File System created on behalf of. */
+    private final String user;
+
+    /** Working directory. */
+    private IgfsPath workingDir;
+
+    /** URI. */
+    private final URI uri;
+
+    /** Authority. */
+    private String uriAuthority;
+
+    /** Client logger. */
+    private IgfsLogger clientLog;
+
+    /** Server block size. */
+    private long grpBlockSize;
+
+    /** Default replication factor. */
+    private short dfltReplication;
+
+    /** Secondary URI string. */
+    private URI secondaryUri;
+
+    /** Mode resolver. */
+    private IgfsModeResolver modeRslvr;
+
+    /** The secondary file system factory. */
+    private HadoopFileSystemFactory factory;
+
+    /** Whether custom sequential reads before prefetch value is provided. */
+    private boolean seqReadsBeforePrefetchOverride;
+
+    /** Custom-provided sequential reads before prefetch. */
+    private int seqReadsBeforePrefetch;
+
+    /** Flag that controls whether file writes should be colocated on data node. */
+    private boolean colocateFileWrites;
+
+    /** Prefer local writes. */
+    private boolean preferLocFileWrites;
+
+    /**
+     * @param name URI for file system.
+     * @param cfg Configuration.
+     * @throws URISyntaxException if name has invalid syntax.
+     * @throws IOException If initialization failed.
+     */
+    public IgniteHadoopFileSystem(URI name, Configuration cfg) throws URISyntaxException, IOException {
+        super(HadoopIgfsEndpoint.normalize(name), IGFS_SCHEME, false, -1);
+
+        uri = name;
+
+        user = getFsHadoopUser();
+
+        try {
+            initialize(name, cfg);
+        }
+        catch (IOException e) {
+            // Close client if exception occurred.
+            if (rmtClient != null)
+                rmtClient.close(false);
+
+            throw e;
+        }
+
+        workingDir = new IgfsPath("/user/" + user);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void checkPath(Path path) {
+        URI uri = path.toUri();
+
+        if (uri.isAbsolute()) {
+            if (!F.eq(uri.getScheme(), IGFS_SCHEME))
+                throw new InvalidPathException("Wrong path scheme [expected=" + IGFS_SCHEME + ", actual=" +
+                    uri.getAuthority() + ']');
+
+            if (!F.eq(uri.getAuthority(), uriAuthority))
+                throw new InvalidPathException("Wrong path authority [expected=" + uriAuthority + ", actual=" +
+                    uri.getAuthority() + ']');
+        }
+    }
+
+    /**
+     * Public setter that can be used by direct users of FS or Visor.
+     *
+     * @param colocateFileWrites Whether all ongoing file writes should be colocated.
+     */
+    @SuppressWarnings("UnusedDeclaration")
+    public void colocateFileWrites(boolean colocateFileWrites) {
+        this.colocateFileWrites = colocateFileWrites;
+    }
+
+    /**
+     * Enter busy state.
+     *
+     * @throws IOException If file system is stopped.
+     */
+    private void enterBusy() throws IOException {
+        if (closeGuard.get())
+            throw new IOException("File system is stopped.");
+    }
+
+    /**
+     * Leave busy state.
+     */
+    private void leaveBusy() {
+        // No-op.
+    }
+
+    /**
+     * @param name URI passed to constructor.
+     * @param cfg Configuration passed to constructor.
+     * @throws IOException If initialization failed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private void initialize(URI name, Configuration cfg) throws IOException {
+        enterBusy();
+
+        try {
+            if (rmtClient != null)
+                throw new IOException("File system is already initialized: " + rmtClient);
+
+            A.notNull(name, "name");
+            A.notNull(cfg, "cfg");
+
+            if (!IGFS_SCHEME.equals(name.getScheme()))
+                throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME +
+                    "://[name]/[optional_path], actual=" + name + ']');
+
+            uriAuthority = name.getAuthority();
+
+            // Override sequential reads before prefetch if needed.
+            seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
+
+            if (seqReadsBeforePrefetch > 0)
+                seqReadsBeforePrefetchOverride = true;
+
+            // In Ignite replication factor is controlled by data cache affinity.
+            // We use replication factor to force the whole file to be stored on local node.
+            dfltReplication = (short)cfg.getInt("dfs.replication", 3);
+
+            // Get file colocation control flag.
+            colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
+            preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
+
+            // Get log directory.
+            String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
+
+            File logDirFile = U.resolveIgnitePath(logDirCfg);
+
+            String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
+
+            rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
+
+            // Handshake.
+            IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
+
+            grpBlockSize = handshake.blockSize();
+
+            IgfsPaths paths = handshake.secondaryPaths();
+
+            Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
+
+            if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
+                // Initiate client logger.
+                if (logDir == null)
+                    throw new IOException("Failed to resolve log directory: " + logDirCfg);
+
+                Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
+
+                clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
+            }
+            else
+                clientLog = IgfsLogger.disabledLogger();
+
+            try {
+                modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
+            }
+            catch (IgniteCheckedException ice) {
+                throw new IOException(ice);
+            }
+
+            boolean initSecondary = paths.defaultMode() == PROXY;
+
+            if (!initSecondary && paths.pathModes() != null) {
+                for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
+                    IgfsMode mode = pathMode.getValue();
+
+                    if (mode == PROXY) {
+                        initSecondary = true;
+
+                        break;
+                    }
+                }
+            }
+
+            if (initSecondary) {
+                try {
+                    factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IOException("Failed to get secondary file system factory.", e);
+                }
+
+                if (factory == null)
+                    throw new IOException("Failed to get secondary file system factory (did you set " +
+                        IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " +
+                        FileSystemConfiguration.class.getName() + "?)");
+
+                assert factory != null;
+
+                if (factory instanceof LifecycleAware)
+                    ((LifecycleAware) factory).start();
+
+                try {
+                    FileSystem secFs = factory.get(user);
+
+                    secondaryUri = secFs.getUri();
+
+                    A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
+                }
+                catch (IOException e) {
+                    throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
+                }
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        if (closeGuard.compareAndSet(false, true)) {
+            if (rmtClient == null)
+                return;
+
+            rmtClient.close(false);
+
+            if (clientLog.isLogEnabled())
+                clientLog.close();
+
+            if (factory instanceof LifecycleAware)
+                ((LifecycleAware) factory).stop();
+
+            // Reset initialized resources.
+            rmtClient = null;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public URI getUri() {
+        return uri;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getUriDefaultPort() {
+        return -1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FsServerDefaults getServerDefaults() throws IOException {
+        return new FsServerDefaults(grpBlockSize, (int)grpBlockSize, (int)grpBlockSize, dfltReplication, 64 * 1024,
+            false, 0, DataChecksum.Type.NULL);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean setReplication(Path f, short replication) throws IOException {
+        return mode(f) == PROXY && secondaryFileSystem().setReplication(f, replication);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTimes(Path f, long mtime, long atime) throws IOException {
+        if (mode(f) == PROXY)
+            secondaryFileSystem().setTimes(f, mtime, atime);
+        else {
+            if (mtime == -1 && atime == -1)
+                return;
+
+            rmtClient.setTimes(convert(f), atime, mtime);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FsStatus getFsStatus() throws IOException {
+        IgfsStatus status = rmtClient.fsStatus();
+
+        return new FsStatus(status.spaceTotal(), status.spaceUsed(), status.spaceTotal() - status.spaceUsed());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setPermission(Path p, FsPermission perm) throws IOException {
+        enterBusy();
+
+        try {
+            A.notNull(p, "p");
+
+            if (mode(p) == PROXY)
+                secondaryFileSystem().setPermission(toSecondary(p), perm);
+            else {
+                if (rmtClient.update(convert(p), permission(perm)) == null)
+                    throw new IOException("Failed to set file permission (file not found?)" +
+                        " [path=" + p + ", perm=" + perm + ']');
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setOwner(Path p, String usr, String grp) throws IOException {
+        A.notNull(p, "p");
+        A.notNull(usr, "username");
+        A.notNull(grp, "grpName");
+
+        enterBusy();
+
+        try {
+            if (mode(p) == PROXY)
+                secondaryFileSystem().setOwner(toSecondary(p), usr, grp);
+            else if (rmtClient.update(convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, usr,
+                IgfsUtils.PROP_GROUP_NAME, grp)) == null) {
+                throw new IOException("Failed to set file permission (file not found?)" +
+                    " [path=" + p + ", username=" + usr + ", grpName=" + grp + ']');
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FSDataInputStream open(Path f, int bufSize) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = modeRslvr.resolveMode(path);
+
+            if (mode == PROXY) {
+                FSDataInputStream is = secondaryFileSystem().open(toSecondary(f), bufSize);
+
+                if (clientLog.isLogEnabled()) {
+                    // At this point we do not know file size, so we perform additional request to remote FS to get it.
+                    FileStatus status = secondaryFileSystem().getFileStatus(toSecondary(f));
+
+                    long size = status != null ? status.getLen() : -1;
+
+                    long logId = IgfsLogger.nextId();
+
+                    clientLog.logOpen(logId, path, PROXY, bufSize, size);
+
+                    return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId));
+                }
+                else
+                    return is;
+            }
+            else {
+                HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ?
+                    rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
+
+                long logId = -1;
+
+                if (clientLog.isLogEnabled()) {
+                    logId = IgfsLogger.nextId();
+
+                    clientLog.logOpen(logId, path, mode, bufSize, stream.length());
+                }
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path +
+                        ", bufSize=" + bufSize + ']');
+
+                HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(),
+                    bufSize, LOG, clientLog, logId);
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
+
+                return new FSDataInputStream(igfsIn);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public FSDataOutputStream createInternal(
+        Path f,
+        EnumSet<CreateFlag> flag,
+        FsPermission perm,
+        int bufSize,
+        short replication,
+        long blockSize,
+        Progressable progress,
+        Options.ChecksumOpt checksumOpt,
+        boolean createParent
+    ) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
+        boolean append = flag.contains(CreateFlag.APPEND);
+        boolean create = flag.contains(CreateFlag.CREATE);
+
+        OutputStream out = null;
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = modeRslvr.resolveMode(path);
+
+            if (LOG.isDebugEnabled())
+                LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" +
+                    path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
+
+            if (mode == PROXY) {
+                FSDataOutputStream os = secondaryFileSystem().create(toSecondary(f), perm, flag, bufSize,
+                    replication, blockSize, progress);
+
+                if (clientLog.isLogEnabled()) {
+                    long logId = IgfsLogger.nextId();
+
+                    if (append)
+                        clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
+                    else
+                        clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);
+
+                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
+                }
+                else
+                    return os;
+            }
+            else {
+                Map<String, String> permMap = F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm),
+                    IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
+
+                // Create stream and close it in the 'finally' section if any sequential operation failed.
+                HadoopIgfsStreamDelegate stream;
+
+                long logId = -1;
+
+                if (append) {
+                    stream = rmtClient.append(path, create, permMap);
+
+                    if (clientLog.isLogEnabled()) {
+                        logId = IgfsLogger.nextId();
+
+                        clientLog.logAppend(logId, path, mode, bufSize);
+                    }
+
+                    if (LOG.isDebugEnabled())
+                        LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
+                }
+                else {
+                    stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize,
+                        permMap);
+
+                    if (clientLog.isLogEnabled()) {
+                        logId = IgfsLogger.nextId();
+
+                        clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
+                    }
+
+                    if (LOG.isDebugEnabled())
+                        LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
+                }
+
+                assert stream != null;
+
+                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG,
+                    clientLog, logId);
+
+                bufSize = Math.max(64 * 1024, bufSize);
+
+                out = new BufferedOutputStream(igfsOut, bufSize);
+
+                FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
+
+                // Mark stream created successfully.
+                out = null;
+
+                return res;
+            }
+        }
+        finally {
+            // Close if failed during stream creation.
+            if (out != null)
+                U.closeQuiet(out);
+
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean supportsSymlinks() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void renameInternal(Path src, Path dst) throws IOException {
+        A.notNull(src, "src");
+        A.notNull(dst, "dst");
+
+        enterBusy();
+
+        try {
+            IgfsPath srcPath = convert(src);
+            IgfsPath dstPath = convert(dst);
+
+            IgfsMode srcMode = modeRslvr.resolveMode(srcPath);
+
+            if (clientLog.isLogEnabled())
+                clientLog.logRename(srcPath, srcMode, dstPath);
+
+            if (srcMode == PROXY)
+                secondaryFileSystem().rename(toSecondary(src), toSecondary(dst));
+            else
+                rmtClient.rename(srcPath, dstPath);
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean delete(Path f, boolean recursive) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+
+            IgfsMode mode = modeRslvr.resolveMode(path);
+
+            if (mode == PROXY) {
+                if (clientLog.isLogEnabled())
+                    clientLog.logDelete(path, PROXY, recursive);
+
+                return secondaryFileSystem().delete(toSecondary(f), recursive);
+            }
+
+            boolean res = rmtClient.delete(path, recursive);
+
+            if (clientLog.isLogEnabled())
+                clientLog.logDelete(path, mode, recursive);
+
+            return res;
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
+        // Checksum has effect for secondary FS only.
+        if (factory != null)
+            secondaryFileSystem().setVerifyChecksum(verifyChecksum);
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileChecksum getFileChecksum(Path f) throws IOException {
+        if (mode(f) == PROXY)
+            return secondaryFileSystem().getFileChecksum(f);
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus[] listStatus(Path f) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = modeRslvr.resolveMode(path);
+
+            if (mode == PROXY) {
+                FileStatus[] arr = secondaryFileSystem().listStatus(toSecondary(f));
+
+                if (arr == null)
+                    throw new FileNotFoundException("File " + f + " does not exist.");
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = toPrimary(arr[i]);
+
+                if (clientLog.isLogEnabled()) {
+                    String[] fileArr = new String[arr.length];
+
+                    for (int i = 0; i < arr.length; i++)
+                        fileArr[i] = arr[i].getPath().toString();
+
+                    clientLog.logListDirectory(path, PROXY, fileArr);
+                }
+
+                return arr;
+            }
+            else {
+                Collection<IgfsFile> list = rmtClient.listFiles(path);
+
+                if (list == null)
+                    throw new FileNotFoundException("File " + f + " does not exist.");
+
+                List<IgfsFile> files = new ArrayList<>(list);
+
+                FileStatus[] arr = new FileStatus[files.size()];
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = convert(files.get(i));
+
+                if (clientLog.isLogEnabled()) {
+                    String[] fileArr = new String[arr.length];
+
+                    for (int i = 0; i < arr.length; i++)
+                        fileArr[i] = arr[i].getPath().toString();
+
+                    clientLog.logListDirectory(path, mode, fileArr);
+                }
+
+                return arr;
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mkdir(Path f, FsPermission perm, boolean createParent) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = modeRslvr.resolveMode(path);
+
+            if (mode == PROXY) {
+                if (clientLog.isLogEnabled())
+                    clientLog.logMakeDirectory(path, PROXY);
+
+                secondaryFileSystem().mkdirs(toSecondary(f), perm);
+            }
+            else {
+                rmtClient.mkdirs(path, permission(perm));
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logMakeDirectory(path, mode);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus getFileStatus(Path f) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            if (mode(f) == PROXY)
+                return toPrimary(secondaryFileSystem().getFileStatus(toSecondary(f)));
+            else {
+                IgfsFile info = rmtClient.info(convert(f));
+
+                if (info == null)
+                    throw new FileNotFoundException("File not found: " + f);
+
+                return convert(info);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws IOException {
+        A.notNull(path, "path");
+
+        IgfsPath igfsPath = convert(path);
+
+        enterBusy();
+
+        try {
+            if (modeRslvr.resolveMode(igfsPath) == PROXY)
+                return secondaryFileSystem().getFileBlockLocations(path, start, len);
+            else {
+                long now = System.currentTimeMillis();
+
+                List<IgfsBlockLocation> affinity = new ArrayList<>(
+                    rmtClient.affinity(igfsPath, start, len));
+
+                BlockLocation[] arr = new BlockLocation[affinity.size()];
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = convert(affinity.get(i));
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Fetched file locations [path=" + path + ", fetchTime=" +
+                        (System.currentTimeMillis() - now) + ", locations=" + Arrays.asList(arr) + ']');
+
+                return arr;
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /**
+     * Resolve path mode.
+     *
+     * @param path HDFS path.
+     * @return Path mode.
+     */
+    public IgfsMode mode(Path path) {
+        return modeRslvr.resolveMode(convert(path));
+    }
+
+    /**
+     * Convert the given path to path acceptable by the primary file system.
+     *
+     * @param path Path.
+     * @return Primary file system path.
+     */
+    private Path toPrimary(Path path) {
+        return convertPath(path, getUri());
+    }
+
+    /**
+     * Convert the given path to path acceptable by the secondary file system.
+     *
+     * @param path Path.
+     * @return Secondary file system path.
+     */
+    private Path toSecondary(Path path) {
+        assert factory != null;
+        assert secondaryUri != null;
+
+        return convertPath(path, secondaryUri);
+    }
+
+    /**
+     * Convert path using the given new URI.
+     *
+     * @param path Old path.
+     * @param newUri New URI.
+     * @return New path.
+     */
+    private Path convertPath(Path path, URI newUri) {
+        assert newUri != null;
+
+        if (path != null) {
+            URI pathUri = path.toUri();
+
+            try {
+                return new Path(new URI(pathUri.getScheme() != null ? newUri.getScheme() : null,
+                    pathUri.getAuthority() != null ? newUri.getAuthority() : null, pathUri.getPath(), null, null));
+            }
+            catch (URISyntaxException e) {
+                throw new IgniteException("Failed to construct secondary file system path from the primary file " +
+                    "system path: " + path, e);
+            }
+        }
+        else
+            return null;
+    }
+
+    /**
+     * Convert a file status obtained from the secondary file system to a status of the primary file system.
+     *
+     * @param status Secondary file system status.
+     * @return Primary file system status.
+     */
+    private FileStatus toPrimary(FileStatus status) {
+        return status != null ? new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(),
+            status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(),
+            status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null;
+    }
+
+    /**
+     * Convert IGFS path into Hadoop path.
+     *
+     * @param path IGFS path.
+     * @return Hadoop path.
+     */
+    private Path convert(IgfsPath path) {
+        return new Path(IGFS_SCHEME, uriAuthority, path.toString());
+    }
+
+    /**
+     * Convert Hadoop path into IGFS path.
+     *
+     * @param path Hadoop path.
+     * @return IGFS path.
+     */
+    @Nullable private IgfsPath convert(Path path) {
+        if (path == null)
+            return null;
+
+        return path.isAbsolute() ? new IgfsPath(path.toUri().getPath()) :
+            new IgfsPath(workingDir, path.toUri().getPath());
+    }
+
+    /**
+     * Convert IGFS affinity block location into Hadoop affinity block location.
+     *
+     * @param block IGFS affinity block location.
+     * @return Hadoop affinity block location.
+     */
+    private BlockLocation convert(IgfsBlockLocation block) {
+        Collection<String> names = block.names();
+        Collection<String> hosts = block.hosts();
+
+        return new BlockLocation(
+            names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
+            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */,
+            block.start(), block.length()
+        ) {
+            @Override public String toString() {
+                try {
+                    return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() +
+                        ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
+                }
+                catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        };
+    }
+
+    /**
+     * Convert IGFS file information into Hadoop file status.
+     *
+     * @param file IGFS file information.
+     * @return Hadoop file status.
+     */
+    private FileStatus convert(IgfsFile file) {
+        return new FileStatus(
+            file.length(),
+            file.isDirectory(),
+            dfltReplication,
+            file.groupBlockSize(),
+            file.modificationTime(),
+            file.accessTime(),
+            permission(file),
+            file.property(IgfsUtils.PROP_USER_NAME, user),
+            file.property(IgfsUtils.PROP_GROUP_NAME, "users"),
+            convert(file.path())) {
+            @Override public String toString() {
+                return "FileStatus [path=" + getPath() + ", isDir=" + isDirectory() + ", len=" + getLen() + "]";
+            }
+        };
+    }
+
+    /**
+     * Convert Hadoop permission into IGFS file attribute.
+     *
+     * @param perm Hadoop permission.
+     * @return IGFS attributes.
+     */
+    private Map<String, String> permission(FsPermission perm) {
+        if (perm == null)
+            perm = FsPermission.getDefault();
+
+        return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
+    }
+
+    /**
+     * @param perm Permission.
+     * @return String.
+     */
+    private static String toString(FsPermission perm) {
+        return String.format("%04o", perm.toShort());
+    }
+
+    /**
+     * Convert IGFS file attributes into Hadoop permission.
+     *
+     * @param file File info.
+     * @return Hadoop permission.
+     */
+    private FsPermission permission(IgfsFile file) {
+        String perm = file.property(IgfsUtils.PROP_PERMISSION, null);
+
+        if (perm == null)
+            return FsPermission.getDefault();
+
+        try {
+            return new FsPermission((short)Integer.parseInt(perm, 8));
+        }
+        catch (NumberFormatException ignore) {
+            return FsPermission.getDefault();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(IgniteHadoopFileSystem.class, this);
+    }
+
+    /**
+     * Returns the user name this File System is created on behalf of.
+     * @return the user name
+     */
+    public String user() {
+        return user;
+    }
+
+    /**
+     * Gets cached or creates a {@link FileSystem}.
+     *
+     * @return The secondary file system.
+     */
+    private FileSystem secondaryFileSystem() throws IOException{
+        assert factory != null;
+
+        return factory.get(user);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
new file mode 100644
index 0000000..d8e70d1
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains Ignite Hadoop 2.x <code>FileSystem</code> implementation.
+ */
+package org.apache.ignite.hadoop.fs.v2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
new file mode 100644
index 0000000..583af35
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.mapreduce;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.client.GridClient;
+import org.apache.ignite.internal.client.GridClientConfiguration;
+import org.apache.ignite.internal.client.GridClientException;
+import org.apache.ignite.internal.client.GridClientFactory;
+import org.apache.ignite.internal.client.marshaller.jdk.GridClientJdkMarshaller;
+import org.apache.ignite.internal.processors.hadoop.proto.HadoopClientProtocol;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.F;
+
+import static org.apache.ignite.internal.client.GridClientProtocol.TCP;
+
+
+/**
+ * Ignite Hadoop client protocol provider.
+ */
+public class IgniteHadoopClientProtocolProvider extends ClientProtocolProvider {
+    /** Framework name used in configuration. */
+    public static final String FRAMEWORK_NAME = "ignite";
+
+    /** Clients. */
+    private static final ConcurrentHashMap<String, IgniteInternalFuture<GridClient>> cliMap = new ConcurrentHashMap<>();
+
+    /** {@inheritDoc} */
+    @Override public ClientProtocol create(Configuration conf) throws IOException {
+        if (FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
+            String addr = conf.get(MRConfig.MASTER_ADDRESS);
+
+            if (F.isEmpty(addr))
+                throw new IOException("Failed to create client protocol because server address is not specified (is " +
+                    MRConfig.MASTER_ADDRESS + " property set?).");
+
+            if (F.eq(addr, "local"))
+                throw new IOException("Local execution mode is not supported, please point " +
+                    MRConfig.MASTER_ADDRESS + " to real Ignite node.");
+
+            return createProtocol(addr, conf);
+        }
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ClientProtocol create(InetSocketAddress addr, Configuration conf) throws IOException {
+        if (FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME)))
+            return createProtocol(addr.getHostString() + ":" + addr.getPort(), conf);
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close(ClientProtocol cliProto) throws IOException {
+        // No-op.
+    }
+
+    /**
+     * Internal protocol creation routine.
+     *
+     * @param addr Address.
+     * @param conf Configuration.
+     * @return Client protocol.
+     * @throws IOException If failed.
+     */
+    private static ClientProtocol createProtocol(String addr, Configuration conf) throws IOException {
+        return new HadoopClientProtocol(conf, client(addr));
+    }
+
+    /**
+     * Create client.
+     *
+     * @param addr Endpoint address.
+     * @return Client.
+     * @throws IOException If failed.
+     */
+    private static GridClient client(String addr) throws IOException {
+        try {
+            IgniteInternalFuture<GridClient> fut = cliMap.get(addr);
+
+            if (fut == null) {
+                GridFutureAdapter<GridClient> fut0 = new GridFutureAdapter<>();
+
+                IgniteInternalFuture<GridClient> oldFut = cliMap.putIfAbsent(addr, fut0);
+
+                if (oldFut != null)
+                    return oldFut.get();
+                else {
+                    GridClientConfiguration cliCfg = new GridClientConfiguration();
+
+                    cliCfg.setProtocol(TCP);
+                    cliCfg.setServers(Collections.singletonList(addr));
+                    cliCfg.setMarshaller(new GridClientJdkMarshaller());
+                    cliCfg.setMaxConnectionIdleTime(24 * 60 * 60 * 1000L); // 1 day.
+                    cliCfg.setDaemon(true);
+
+                    try {
+                        GridClient cli = GridClientFactory.start(cliCfg);
+
+                        fut0.onDone(cli);
+
+                        return cli;
+                    }
+                    catch (GridClientException e) {
+                        fut0.onDone(e);
+
+                        throw new IOException("Failed to establish connection with Ignite node: " + addr, e);
+                    }
+                }
+            }
+            else
+                return fut.get();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IOException("Failed to establish connection with Ignite node: " + addr, e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
new file mode 100644
index 0000000..7635b9e
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Ignite Hadoop Accelerator map-reduce classes.
+ */
+package org.apache.ignite.hadoop.mapreduce;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
new file mode 100644
index 0000000..23eaa18
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.IgniteNodeAttributes;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Arrays;
+
+/**
+ * Hadoop attributes.
+ */
+public class HadoopAttributes implements Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Attribute name. */
+    public static final String NAME = IgniteNodeAttributes.ATTR_PREFIX + ".hadoop";
+
+    /** Map-reduce planner class name. */
+    private String plannerCls;
+
+    /** External executor flag. */
+    private boolean extExec;
+
+    /** Maximum parallel tasks. */
+    private int maxParallelTasks;
+
+    /** Maximum task queue size. */
+    private int maxTaskQueueSize;
+
+    /** Library names. */
+    @GridToStringExclude
+    private String[] libNames;
+
+    /** Number of cores. */
+    private int cores;
+
+    /**
+     * Get attributes for node (if any).
+     *
+     * @param node Node.
+     * @return Attributes or {@code null} if Hadoop Accelerator is not enabled for node.
+     */
+    @Nullable public static HadoopAttributes forNode(ClusterNode node) {
+        return node.attribute(NAME);
+    }
+
+    /**
+     * {@link Externalizable} support.
+     */
+    public HadoopAttributes() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param cfg Configuration.
+     */
+    public HadoopAttributes(HadoopConfiguration cfg) {
+        assert cfg != null;
+        assert cfg.getMapReducePlanner() != null;
+
+        plannerCls = cfg.getMapReducePlanner().getClass().getName();
+
+        // TODO: IGNITE-404: Get from configuration when fixed.
+        extExec = false;
+
+        maxParallelTasks = cfg.getMaxParallelTasks();
+        maxTaskQueueSize = cfg.getMaxTaskQueueSize();
+        libNames = cfg.getNativeLibraryNames();
+
+        // Cores count already passed in other attributes, we add it here for convenience.
+        cores = Runtime.getRuntime().availableProcessors();
+    }
+
+    /**
+     * @return Map reduce planner class name.
+     */
+    public String plannerClassName() {
+        return plannerCls;
+    }
+
+    /**
+     * @return External execution flag.
+     */
+    public boolean externalExecution() {
+        return extExec;
+    }
+
+    /**
+     * @return Maximum parallel tasks.
+     */
+    public int maxParallelTasks() {
+        return maxParallelTasks;
+    }
+
+    /**
+     * @return Maximum task queue size.
+     */
+    public int maxTaskQueueSize() {
+        return maxTaskQueueSize;
+    }
+
+
+    /**
+     * @return Native library names.
+     */
+    public String[] nativeLibraryNames() {
+        return libNames;
+    }
+
+    /**
+     * @return Number of cores on machine.
+     */
+    public int cores() {
+        return cores;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeObject(plannerCls);
+        out.writeBoolean(extExec);
+        out.writeInt(maxParallelTasks);
+        out.writeInt(maxTaskQueueSize);
+        out.writeObject(libNames);
+        out.writeInt(cores);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        plannerCls = (String)in.readObject();
+        extExec = in.readBoolean();
+        maxParallelTasks = in.readInt();
+        maxTaskQueueSize = in.readInt();
+        libNames = (String[])in.readObject();
+        cores = in.readInt();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopAttributes.class, this, "libNames", Arrays.toString(libNames));
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
new file mode 100644
index 0000000..aeda5c0
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+
+/**
+ * Abstract class for all hadoop components.
+ */
+public abstract class HadoopComponent {
+    /** Hadoop context. */
+    protected HadoopContext ctx;
+
+    /** Logger. */
+    protected IgniteLogger log;
+
+    /**
+     * @param ctx Hadoop context.
+     */
+    public void start(HadoopContext ctx) throws IgniteCheckedException {
+        this.ctx = ctx;
+
+        log = ctx.kernalContext().log(getClass());
+    }
+
+    /**
+     * Stops manager.
+     */
+    public void stop(boolean cancel) {
+        // No-op.
+    }
+
+    /**
+     * Callback invoked when all grid components are started.
+     */
+    public void onKernalStart() throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /**
+     * Callback invoked before all grid components are stopped.
+     */
+    public void onKernalStop(boolean cancel) {
+        // No-op.
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
new file mode 100644
index 0000000..42a3d72
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.UUID;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
+import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffle;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskExecutorAdapter;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+
+/**
+ * Hadoop accelerator context.
+ */
+public class HadoopContext {
+    /** Kernal context. */
+    private GridKernalContext ctx;
+
+    /** Hadoop configuration. */
+    private HadoopConfiguration cfg;
+
+    /** Job tracker. */
+    private HadoopJobTracker jobTracker;
+
+    /** External task executor. */
+    private HadoopTaskExecutorAdapter taskExecutor;
+
+    /** */
+    private HadoopShuffle shuffle;
+
+    /** Managers list. */
+    private List<HadoopComponent> components = new ArrayList<>();
+
+    /**
+     * @param ctx Kernal context.
+     */
+    public HadoopContext(
+        GridKernalContext ctx,
+        HadoopConfiguration cfg,
+        HadoopJobTracker jobTracker,
+        HadoopTaskExecutorAdapter taskExecutor,
+        HadoopShuffle shuffle
+    ) {
+        this.ctx = ctx;
+        this.cfg = cfg;
+
+        this.jobTracker = add(jobTracker);
+        this.taskExecutor = add(taskExecutor);
+        this.shuffle = add(shuffle);
+    }
+
+    /**
+     * Gets list of managers.
+     *
+     * @return List of managers.
+     */
+    public List<HadoopComponent> components() {
+        return components;
+    }
+
+    /**
+     * Gets kernal context.
+     *
+     * @return Grid kernal context instance.
+     */
+    public GridKernalContext kernalContext() {
+        return ctx;
+    }
+
+    /**
+     * Gets Hadoop configuration.
+     *
+     * @return Hadoop configuration.
+     */
+    public HadoopConfiguration configuration() {
+        return cfg;
+    }
+
+    /**
+     * Gets local node ID. Shortcut for {@code kernalContext().localNodeId()}.
+     *
+     * @return Local node ID.
+     */
+    public UUID localNodeId() {
+        return ctx.localNodeId();
+    }
+
+    /**
+     * Gets local node order.
+     *
+     * @return Local node order.
+     */
+    public long localNodeOrder() {
+        assert ctx.discovery() != null;
+
+        return ctx.discovery().localNode().order();
+    }
+
+    /**
+     * @return Hadoop-enabled nodes.
+     */
+    public Collection<ClusterNode> nodes() {
+        return ctx.discovery().cacheNodes(CU.SYS_CACHE_HADOOP_MR, ctx.discovery().topologyVersionEx());
+    }
+
+    /**
+     * @return {@code True} if
+     */
+    public boolean jobUpdateLeader() {
+        long minOrder = Long.MAX_VALUE;
+        ClusterNode minOrderNode = null;
+
+        for (ClusterNode node : nodes()) {
+            if (node.order() < minOrder) {
+                minOrder = node.order();
+                minOrderNode = node;
+            }
+        }
+
+        assert minOrderNode != null;
+
+        return localNodeId().equals(minOrderNode.id());
+    }
+
+    /**
+     * @param meta Job metadata.
+     * @return {@code true} If local node is participating in job execution.
+     */
+    public boolean isParticipating(HadoopJobMetadata meta) {
+        UUID locNodeId = localNodeId();
+
+        if (locNodeId.equals(meta.submitNodeId()))
+            return true;
+
+        HadoopMapReducePlan plan = meta.mapReducePlan();
+
+        return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader();
+    }
+
+    /**
+     * @return Jon tracker instance.
+     */
+    public HadoopJobTracker jobTracker() {
+        return jobTracker;
+    }
+
+    /**
+     * @return Task executor.
+     */
+    public HadoopTaskExecutorAdapter taskExecutor() {
+        return taskExecutor;
+    }
+
+    /**
+     * @return Shuffle.
+     */
+    public HadoopShuffle shuffle() {
+        return shuffle;
+    }
+
+    /**
+     * @return Map-reduce planner.
+     */
+    public HadoopMapReducePlanner planner() {
+        return cfg.getMapReducePlanner();
+    }
+
+    /**
+     * Adds component.
+     *
+     * @param c Component to add.
+     * @return Added manager.
+     */
+    private <C extends HadoopComponent> C add(C c) {
+        components.add(c);
+
+        return c;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
new file mode 100644
index 0000000..1382c1f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.lang.reflect.Constructor;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop job info based on default Hadoop configuration.
+ */
+public class HadoopDefaultJobInfo implements HadoopJobInfo, Externalizable {
+    /** */
+    private static final long serialVersionUID = 5489900236464999951L;
+
+    /** {@code true} If job has combiner. */
+    private boolean hasCombiner;
+
+    /** Number of reducers configured for job. */
+    private int numReduces;
+
+    /** Configuration. */
+    private Map<String,String> props = new HashMap<>();
+
+    /** Job name. */
+    private String jobName;
+
+    /** User name. */
+    private String user;
+
+    /**
+     * Default constructor required by {@link Externalizable}.
+     */
+    public HadoopDefaultJobInfo() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param jobName Job name.
+     * @param user User name.
+     * @param hasCombiner {@code true} If job has combiner.
+     * @param numReduces Number of reducers configured for job.
+     * @param props All other properties of the job.
+     */
+    public HadoopDefaultJobInfo(String jobName, String user, boolean hasCombiner, int numReduces,
+        Map<String, String> props) {
+        this.jobName = jobName;
+        this.user = user;
+        this.hasCombiner = hasCombiner;
+        this.numReduces = numReduces;
+        this.props = props;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public String property(String name) {
+        return props.get(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJob createJob(Class<? extends HadoopJob> jobCls, HadoopJobId jobId, IgniteLogger log,
+        @Nullable String[] libNames) throws IgniteCheckedException {
+        assert jobCls != null;
+
+        try {
+            Constructor<? extends HadoopJob> constructor = jobCls.getConstructor(HadoopJobId.class,
+                HadoopDefaultJobInfo.class, IgniteLogger.class, String[].class);
+
+            return constructor.newInstance(jobId, this, log, libNames);
+        }
+        catch (Throwable t) {
+            if (t instanceof Error)
+                throw (Error)t;
+            
+            throw new IgniteCheckedException(t);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean hasCombiner() {
+        return hasCombiner;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean hasReducer() {
+        return reducers() > 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int reducers() {
+        return numReduces;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String jobName() {
+        return jobName;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String user() {
+        return user;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        U.writeString(out, jobName);
+        U.writeString(out, user);
+
+        out.writeBoolean(hasCombiner);
+        out.writeInt(numReduces);
+
+        U.writeStringMap(out, props);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobName = U.readString(in);
+        user = U.readString(in);
+
+        hasCombiner = in.readBoolean();
+        numReduces = in.readInt();
+
+        props = U.readStringMap(in);
+    }
+
+    /**
+     * @return Properties of the job.
+     */
+    public Map<String, String> properties() {
+        return props;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
new file mode 100644
index 0000000..ed2657e
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.util.GridSpinBusyLock;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop facade implementation.
+ */
+public class HadoopImpl implements Hadoop {
+    /** Hadoop processor. */
+    private final HadoopProcessor proc;
+
+    /** Busy lock. */
+    private final GridSpinBusyLock busyLock = new GridSpinBusyLock();
+
+    /**
+     * Constructor.
+     *
+     * @param proc Hadoop processor.
+     */
+    HadoopImpl(HadoopProcessor proc) {
+        this.proc = proc;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration configuration() {
+        return proc.config();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobId nextJobId() {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.nextJobId();
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to get next job ID (grid is stopping).");
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<?> submit(HadoopJobId jobId, HadoopJobInfo jobInfo) {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.submit(jobId, jobInfo);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to submit job (grid is stopping).");
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.status(jobId);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to get job status (grid is stopping).");
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.counters(jobId);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to get job counters (grid is stopping).");
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.finishFuture(jobId);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to get job finish future (grid is stopping).");
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException {
+        if (busyLock.enterBusy()) {
+            try {
+                return proc.kill(jobId);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+        else
+            throw new IllegalStateException("Failed to kill job (grid is stopping).");
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
new file mode 100644
index 0000000..4e03e17
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Iterator;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
+
+/**
+ * Hadoop +counter group adapter.
+ */
+class HadoopMapReduceCounterGroup implements CounterGroup {
+    /** Counters. */
+    private final HadoopMapReduceCounters cntrs;
+
+    /** Group name. */
+    private final String name;
+
+    /**
+     * Creates new instance.
+     *
+     * @param cntrs Client counters instance.
+     * @param name Group name.
+     */
+    HadoopMapReduceCounterGroup(HadoopMapReduceCounters cntrs, String name) {
+        this.cntrs = cntrs;
+        this.name = name;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getName() {
+        return name;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getDisplayName() {
+        return name;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDisplayName(String displayName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addCounter(Counter counter) {
+        addCounter(counter.getName(), counter.getDisplayName(), 0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter addCounter(String name, String displayName, long value) {
+        final Counter counter = cntrs.findCounter(this.name, name);
+
+        counter.setValue(value);
+
+        return counter;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter findCounter(String counterName, String displayName) {
+        return cntrs.findCounter(name, counterName);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter findCounter(String counterName, boolean create) {
+        return cntrs.findCounter(name, counterName, create);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter findCounter(String counterName) {
+        return cntrs.findCounter(name, counterName);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int size() {
+        return cntrs.groupSize(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void incrAllCounters(CounterGroupBase<Counter> rightGroup) {
+        for (final Counter counter : rightGroup)
+            cntrs.findCounter(name, counter.getName()).increment(counter.getValue());
+    }
+
+    /** {@inheritDoc} */
+    @Override public CounterGroupBase<Counter> getUnderlyingGroup() {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Iterator<Counter> iterator() {
+        return cntrs.iterateGroup(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
new file mode 100644
index 0000000..57a853f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.FileSystemCounter;
+import org.apache.hadoop.mapreduce.counters.AbstractCounters;
+import org.apache.hadoop.mapreduce.counters.Limits;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Counter;
+import org.apache.ignite.internal.util.typedef.T2;
+
+/**
+ * Hadoop counters adapter.
+ */
+public class HadoopMapReduceCounters extends Counters {
+    /** */
+    private final Map<T2<String,String>,HadoopLongCounter> cntrs = new HashMap<>();
+
+    /**
+     * Creates new instance based on given counters.
+     *
+     * @param cntrs Counters to adapt.
+     */
+    public HadoopMapReduceCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters cntrs) {
+        for (HadoopCounter cntr : cntrs.all())
+            if (cntr instanceof HadoopLongCounter)
+                this.cntrs.put(new T2<>(cntr.group(), cntr.name()), (HadoopLongCounter) cntr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized CounterGroup addGroup(CounterGroup grp) {
+        return addGroup(grp.getName(), grp.getDisplayName());
+    }
+
+    /** {@inheritDoc} */
+    @Override public CounterGroup addGroup(String name, String displayName) {
+        return new HadoopMapReduceCounterGroup(this, name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter findCounter(String grpName, String cntrName) {
+        return findCounter(grpName, cntrName, true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized Counter findCounter(Enum<?> key) {
+        return findCounter(key.getDeclaringClass().getName(), key.name(), true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized Counter findCounter(String scheme, FileSystemCounter key) {
+        return findCounter(String.format("FileSystem Counter (%s)", scheme), key.name());
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized Iterable<String> getGroupNames() {
+        Collection<String> res = new HashSet<>();
+
+        for (HadoopCounter counter : cntrs.values())
+            res.add(counter.group());
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Iterator<CounterGroup> iterator() {
+        final Iterator<String> iter = getGroupNames().iterator();
+
+        return new Iterator<CounterGroup>() {
+            @Override public boolean hasNext() {
+                return iter.hasNext();
+            }
+
+            @Override public CounterGroup next() {
+                if (!hasNext())
+                    throw new NoSuchElementException();
+
+                return new HadoopMapReduceCounterGroup(HadoopMapReduceCounters.this, iter.next());
+            }
+
+            @Override public void remove() {
+                throw new UnsupportedOperationException("not implemented");
+            }
+        };
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized CounterGroup getGroup(String grpName) {
+        return new HadoopMapReduceCounterGroup(this, grpName);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int countCounters() {
+        return cntrs.size();
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void incrAllCounters(AbstractCounters<Counter, CounterGroup> other) {
+        for (CounterGroup group : other) {
+            for (Counter counter : group) {
+                findCounter(group.getName(), counter.getName()).increment(counter.getValue());
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object genericRight) {
+        if (!(genericRight instanceof HadoopMapReduceCounters))
+            return false;
+
+        return cntrs.equals(((HadoopMapReduceCounters) genericRight).cntrs);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return cntrs.hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setWriteAllCounters(boolean snd) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean getWriteAllCounters() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Limits limits() {
+        return null;
+    }
+
+    /**
+     * Returns size of a group.
+     *
+     * @param grpName Name of the group.
+     * @return amount of counters in the given group.
+     */
+    public int groupSize(String grpName) {
+        int res = 0;
+
+        for (HadoopCounter counter : cntrs.values()) {
+            if (grpName.equals(counter.group()))
+                res++;
+        }
+
+        return res;
+    }
+
+    /**
+     * Returns counters iterator for specified group.
+     *
+     * @param grpName Name of the group to iterate.
+     * @return Counters iterator.
+     */
+    public Iterator<Counter> iterateGroup(String grpName) {
+        Collection<Counter> grpCounters = new ArrayList<>();
+
+        for (HadoopLongCounter counter : cntrs.values()) {
+            if (grpName.equals(counter.group()))
+                grpCounters.add(new HadoopV2Counter(counter));
+        }
+
+        return grpCounters.iterator();
+    }
+
+    /**
+     * Find a counter in the group.
+     *
+     * @param grpName The name of the counter group.
+     * @param cntrName The name of the counter.
+     * @param create Create the counter if not found if true.
+     * @return The counter that was found or added or {@code null} if create is false.
+     */
+    public Counter findCounter(String grpName, String cntrName, boolean create) {
+        T2<String, String> key = new T2<>(grpName, cntrName);
+
+        HadoopLongCounter internalCntr = cntrs.get(key);
+
+        if (internalCntr == null & create) {
+            internalCntr = new HadoopLongCounter(grpName,cntrName);
+
+            cntrs.put(key, new HadoopLongCounter(grpName,cntrName));
+        }
+
+        return internalCntr == null ? null : new HadoopV2Counter(internalCntr);
+    }
+}
\ No newline at end of file


[04/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
deleted file mode 100644
index 8ddb359..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.IgniteKernal;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfs;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.processors.igfs.IgfsContext;
-import org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter;
-import org.apache.ignite.internal.processors.igfs.IgfsServer;
-import org.apache.ignite.internal.processors.igfs.IgfsServerHandler;
-import org.apache.ignite.internal.processors.igfs.IgfsServerManager;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * Test interaction between a IGFS client and a IGFS server.
- */
-public class IgniteHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest {
-    /** Logger. */
-    private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystemClientSelfTest.class);
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        startGrids(1);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs");
-        igfsCfg.setBlockSize(512 * 1024);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(DFLT_IPC_PORT);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        cfg.setCacheConfiguration(cacheConfiguration());
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        return cfg;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @return Cache configuration.
-     */
-    protected CacheConfiguration[] cacheConfiguration() {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(FULL_SYNC);
-        cacheCfg.setEvictionPolicy(null);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
-        metaCacheCfg.setEvictionPolicy(null);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
-    }
-
-    /**
-     * Test output stream deferred exception (GG-4440).
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testOutputStreamDeferredException() throws Exception {
-        final byte[] data = "test".getBytes();
-
-        try {
-            switchHandlerErrorFlag(true);
-
-            HadoopIgfs client = new HadoopIgfsOutProc("127.0.0.1", 10500, getTestGridName(0), "igfs", LOG, null);
-
-            client.handshake(null);
-
-            IgfsPath path = new IgfsPath("/test1.file");
-
-            HadoopIgfsStreamDelegate delegate = client.create(path, true, false, 1, 1024, null);
-
-            final HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(delegate, LOG,
-                IgfsLogger.disabledLogger(), 0);
-
-            // This call should return fine as exception is thrown for the first time.
-            igfsOut.write(data);
-
-            U.sleep(500);
-
-            // This call should throw an IO exception.
-            GridTestUtils.assertThrows(null, new Callable<Object>() {
-                @Override public Object call() throws Exception {
-                    igfsOut.write(data);
-
-                    return null;
-                }
-            }, IOException.class, "Failed to write data to server (test).");
-        }
-        finally {
-            switchHandlerErrorFlag(false);
-        }
-    }
-
-    /**
-     * Set IGFS REST handler error flag to the given state.
-     *
-     * @param flag Flag state.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private void switchHandlerErrorFlag(boolean flag) throws Exception {
-        IgfsProcessorAdapter igfsProc = ((IgniteKernal)grid(0)).context().igfs();
-
-        Map<String, IgfsContext> igfsMap = getField(igfsProc, "igfsCache");
-
-        IgfsServerManager srvMgr = F.first(igfsMap.values()).server();
-
-        Collection<IgfsServer> srvrs = getField(srvMgr, "srvrs");
-
-        IgfsServerHandler igfsHnd = getField(F.first(srvrs), "hnd");
-
-        Field field = igfsHnd.getClass().getDeclaredField("errWrite");
-
-        field.setAccessible(true);
-
-        field.set(null, flag);
-    }
-
-    /**
-     * Get value of the field with the given name of the given object.
-     *
-     * @param obj Object.
-     * @param fieldName Field name.
-     * @return Value of the field.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("unchecked")
-    private <T> T getField(Object obj, String fieldName) throws Exception {
-        Field field = obj.getClass().getDeclaredField(fieldName);
-
-        field.setAccessible(true);
-
-        return (T)field.get(obj);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
deleted file mode 100644
index fdb0d77..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.Callable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP;
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * Tests for IGFS file system handshake.
- */
-public class IgniteHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTest {
-    /** IP finder. */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Grid name. */
-    private static final String GRID_NAME = "grid";
-
-    /** IGFS name. */
-    private static final String IGFS_NAME = "igfs";
-
-    /** IGFS path. */
-    private static final IgfsPath PATH = new IgfsPath("/path");
-
-    /** A host-port pair used for URI in embedded mode. */
-    private static final String HOST_PORT_UNUSED = "somehost:65333";
-
-    /** Flag defines if to use TCP or embedded connection mode: */
-    private boolean tcp = false;
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids(true);
-    }
-
-    /**
-     * Tests for Grid and IGFS having normal names.
-     *
-     * @throws Exception If failed.
-     */
-    public void testHandshake() throws Exception {
-        startUp(false, false);
-
-        tcp = true;
-
-        checkValid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkValid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
-        checkValid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid(IGFS_NAME + "@");
-        checkValid(IGFS_NAME + "@127.0.0.1");
-        checkValid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid(":" + GRID_NAME + "@");
-        checkValid(":" + GRID_NAME + "@127.0.0.1");
-        checkValid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid("");
-        checkValid("127.0.0.1");
-        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
-
-        tcp = false; // Embedded mode:
-
-        checkValid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkValid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid(IGFS_NAME + "@"); // Embedded mode fails, but remote tcp succeeds.
-        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid(":" + GRID_NAME + "@"); // Embedded mode fails, but remote tcp succeeds.
-        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid("@"); // Embedded mode fails, but remote tcp succeeds.
-        checkInvalid("@" + HOST_PORT_UNUSED);
-    }
-
-    /**
-     * Tests for Grid having {@code null} name and IGFS having normal name.
-     *
-     * @throws Exception If failed.
-     */
-    public void testHandshakeDefaultGrid() throws Exception {
-        startUp(true, false);
-
-        tcp = true;
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid(IGFS_NAME + "@");
-        checkValid(IGFS_NAME + "@127.0.0.1");
-        checkValid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkInvalid(":" + GRID_NAME + "@");
-        checkInvalid(":" + GRID_NAME + "@127.0.0.1");
-        checkInvalid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid("");
-        checkValid("127.0.0.1");
-        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
-
-        tcp = false; // Embedded mode:
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid(IGFS_NAME + "@");
-        checkValid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkInvalid(":" + GRID_NAME + "@");
-        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid("@"); // Embedded mode fails, but remote tcp succeeds.
-        checkInvalid("@" + HOST_PORT_UNUSED);
-    }
-
-    /**
-     * Tests for Grid having normal name and IGFS having {@code null} name.
-     *
-     * @throws Exception If failed.
-     */
-    public void testHandshakeDefaultIgfs() throws Exception {
-        startUp(false/*grid name*/, true/*default igfs*/);
-
-        tcp = true;
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkInvalid(IGFS_NAME + "@");
-        checkInvalid(IGFS_NAME + "@127.0.0.1");
-        checkInvalid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid(":" + GRID_NAME + "@");
-        checkValid(":" + GRID_NAME + "@127.0.0.1");
-        checkValid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid("");
-        checkValid("127.0.0.1");
-        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
-
-        tcp = false; // Embedded mode:
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkInvalid(IGFS_NAME + "@");
-        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid(":" + GRID_NAME + "@");
-        checkValid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid("@"); // NB: in embedded mode this fails, but remote TCP still succeeds.
-        checkInvalid("@" + HOST_PORT_UNUSED);
-    }
-
-    /**
-     * Tests for Grid having {@code null} name and IGFS having {@code null} name.
-     *
-     * @throws Exception If failed.
-     */
-    public void testHandshakeDefaultGridDefaultIgfs() throws Exception {
-        startUp(true, true);
-
-        tcp = true;
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkInvalid(IGFS_NAME + "@");
-        checkInvalid(IGFS_NAME + "@127.0.0.1");
-        checkInvalid(IGFS_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkInvalid(":" + GRID_NAME + "@");
-        checkInvalid(":" + GRID_NAME + "@127.0.0.1");
-        checkInvalid(":" + GRID_NAME + "@127.0.0.1:" + DFLT_IPC_PORT);
-
-        checkValid("");
-        checkValid("127.0.0.1");
-        checkValid("127.0.0.1:" + DFLT_IPC_PORT);
-
-        tcp = false; // Embedded mode:
-
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@");
-        checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkInvalid(IGFS_NAME + "@");
-        checkInvalid(IGFS_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkInvalid(":" + GRID_NAME + "@");
-        checkInvalid(":" + GRID_NAME + "@" + HOST_PORT_UNUSED);
-
-        checkValid("@");
-        checkValid("@" + HOST_PORT_UNUSED);
-    }
-
-    /**
-     * Perform startup.
-     *
-     * @param dfltGridName Default Grid name.
-     * @param dfltIgfsName Default IGFS name.
-     * @throws Exception If failed.
-     */
-    private void startUp(boolean dfltGridName, boolean dfltIgfsName) throws Exception {
-        Ignite ignite = G.start(gridConfiguration(dfltGridName, dfltIgfsName));
-
-        IgniteFileSystem igfs = ignite.fileSystem(dfltIgfsName ? null : IGFS_NAME);
-
-        igfs.mkdirs(PATH);
-    }
-
-    /**
-     * Create Grid configuration.
-     *
-     * @param dfltGridName Default Grid name.
-     * @param dfltIgfsName Default IGFS name.
-     * @return Grid configuration.
-     * @throws Exception If failed.
-     */
-    private IgniteConfiguration gridConfiguration(boolean dfltGridName, boolean dfltIgfsName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(dfltGridName ? null : GRID_NAME);
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setConnectorConfiguration(null);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(IP_FINDER);
-
-        cfg.setDiscoverySpi(discoSpi);
-
-        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
-
-        commSpi.setSharedMemoryPort(-1);
-
-        cfg.setCommunicationSpi(commSpi);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
-
-        dataCacheCfg.setName("partitioned");
-        dataCacheCfg.setCacheMode(PARTITIONED);
-        dataCacheCfg.setNearConfiguration(null);
-        dataCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
-        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
-        dataCacheCfg.setBackups(0);
-        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        cfg.setCacheConfiguration(metaCacheCfg, dataCacheCfg);
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName(dfltIgfsName ? null : IGFS_NAME);
-        igfsCfg.setPrefetchBlocks(1);
-        igfsCfg.setDefaultMode(PRIMARY);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(DFLT_IPC_PORT);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        igfsCfg.setManagementPort(-1);
-        igfsCfg.setBlockSize(512 * 1024);
-
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        return cfg;
-    }
-
-    /**
-     * Check valid file system endpoint.
-     *
-     * @param authority Authority.
-     * @throws Exception If failed.
-     */
-    private void checkValid(String authority) throws Exception {
-        FileSystem fs = fileSystem(authority, tcp);
-
-        assert fs.exists(new Path(PATH.toString()));
-    }
-
-    /**
-     * Check invalid file system endpoint.
-     *
-     * @param authority Authority.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    private void checkInvalid(final String authority) throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fileSystem(authority, tcp);
-
-                return null;
-            }
-        }, IOException.class, null);
-    }
-
-    /**
-     * Gets the file system using authority and tcp flag.
-     *
-     * @param authority Authority.
-     * @return File system.
-     * @throws Exception If failed.
-     */
-    private static FileSystem fileSystem(String authority, boolean tcp) throws Exception {
-        return FileSystem.get(new URI("igfs://" + authority + "/"), configuration(authority, tcp));
-    }
-
-    /**
-     * Create configuration for test.
-     *
-     * @param authority Authority.
-     * @return Configuration.
-     */
-    private static Configuration configuration(String authority, boolean tcp) {
-        Configuration cfg = new Configuration();
-
-        cfg.set("fs.defaultFS", "igfs://" + authority + "/");
-        cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName());
-        cfg.set("fs.AbstractFileSystem.igfs.impl",
-            IgniteHadoopFileSystem.class.getName());
-
-        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        if (tcp)
-            cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
-        else
-            cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority), true);
-
-        cfg.setBoolean(String.format(PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
-
-        return cfg;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
deleted file mode 100644
index 4d7a39e..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.lang.reflect.Field;
-import java.net.URI;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
-
-/**
- * IPC cache test.
- */
-public class IgniteHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest {
-    /** IP finder. */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Path to test hadoop configuration. */
-    private static final String HADOOP_FS_CFG = "modules/core/src/test/config/hadoop/core-site.xml";
-
-    /** Group size. */
-    public static final int GRP_SIZE = 128;
-
-    /** Started grid counter. */
-    private static int cnt;
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-        discoSpi.setIpFinder(IP_FINDER);
-
-        cfg.setDiscoverySpi(discoSpi);
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs");
-        igfsCfg.setManagementPort(FileSystemConfiguration.DFLT_MGMT_PORT + cnt);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
-        endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
-
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setCacheConfiguration(cacheConfiguration());
-
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-
-        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
-
-        commSpi.setSharedMemoryPort(-1);
-
-        cfg.setCommunicationSpi(commSpi);
-
-        cnt++;
-
-        return cfg;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @return Cache configuration.
-     */
-    private CacheConfiguration[] cacheConfiguration() {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        startGrids(4);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-    }
-
-    /**
-     * Test how IPC cache map works.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("unchecked")
-    public void testIpcCache() throws Exception {
-        Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache");
-
-        cacheField.setAccessible(true);
-
-        Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt");
-
-        activeCntField.setAccessible(true);
-
-        Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>)cacheField.get(null);
-
-        cache.clear(); // avoid influence of previous tests in the same process.
-
-        String name = "igfs:" + getTestGridName(0) + "@";
-
-        Configuration cfg = new Configuration();
-
-        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
-        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-        cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true);
-
-        // Ensure that existing IO is reused.
-        FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg);
-
-        assertEquals(1, cache.size());
-
-        HadoopIgfsIpcIo io = null;
-
-        System.out.println("CACHE: " + cache);
-
-        for (String key : cache.keySet()) {
-            if (key.contains("10500")) {
-                io = cache.get(key);
-
-                break;
-            }
-        }
-
-        assert io != null;
-
-        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
-
-        // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
-        FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg);
-
-        assertEquals(1, cache.size());
-        assertEquals(2, ((AtomicInteger)activeCntField.get(io)).get());
-
-        fs2.close();
-
-        assertEquals(1, cache.size());
-        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
-
-        Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping");
-
-        stopField.setAccessible(true);
-
-        assert !(Boolean)stopField.get(io);
-
-        // Ensure that IO is stopped when nobody else is need it.
-        fs1.close();
-
-        assert cache.isEmpty();
-
-        assert (Boolean)stopField.get(io);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
deleted file mode 100644
index 3013311..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.typedef.internal.SB;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FilenameFilter;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.DELIM_FIELD;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.DELIM_FIELD_VAL;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.HDR;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_CLOSE_IN;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_CLOSE_OUT;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DELETE;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DIR_LIST;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_DIR_MAKE;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_MARK;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_OPEN_IN;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_OPEN_OUT;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RANDOM_READ;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RENAME;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_RESET;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_SEEK;
-import static org.apache.ignite.internal.igfs.common.IgfsLogger.TYPE_SKIP;
-
-/**
- * Grid IGFS client logger test.
- */
-public class IgniteHadoopFileSystemLoggerSelfTest extends IgfsCommonAbstractTest {
-    /** Path string. */
-    private static final String PATH_STR = "/dir1/dir2/file;test";
-
-    /** Path string with escaped semicolons. */
-    private static final String PATH_STR_ESCAPED = PATH_STR.replace(';', '~');
-
-    /** Path. */
-    private static final IgfsPath PATH = new IgfsPath(PATH_STR);
-
-    /** IGFS name. */
-    private static final String IGFS_NAME = "igfs";
-
-    /** Log file path. */
-    private static final String LOG_DIR = U.getIgniteHome();
-
-    /** Endpoint address. */
-    private static final String ENDPOINT = "localhost:10500";
-
-    /** Log file name. */
-    private static final String LOG_FILE = LOG_DIR + File.separator + "igfs-log-" + IGFS_NAME + "-" + U.jvmPid() +
-        ".csv";
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        removeLogs();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        removeLogs();
-    }
-
-    /**
-     * Remove existing logs.
-     *
-     * @throws Exception If failed.
-     */
-    private void removeLogs() throws Exception {
-        File dir = new File(LOG_DIR);
-
-        File[] logs = dir.listFiles(new FilenameFilter() {
-            @Override public boolean accept(File dir, String name) {
-                return name.startsWith("igfs-log-");
-            }
-        });
-
-        for (File log : logs)
-            log.delete();
-    }
-
-    /**
-     * Ensure correct static loggers creation/removal as well as file creation.
-     *
-     * @throws Exception If failed.
-     */
-    public void testCreateDelete() throws Exception {
-        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        IgfsLogger sameLog0 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        // Loggers for the same endpoint must be the same object.
-        assert log == sameLog0;
-
-        IgfsLogger otherLog = IgfsLogger.logger("other" + ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        // Logger for another endpoint must be different.
-        assert log != otherLog;
-
-        otherLog.close();
-
-        log.logDelete(PATH, PRIMARY, false);
-
-        log.close();
-
-        File logFile = new File(LOG_FILE);
-
-        // When there are multiple loggers, closing one must not force flushing.
-        assert !logFile.exists();
-
-        IgfsLogger sameLog1 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        assert sameLog0 == sameLog1;
-
-        sameLog0.close();
-
-        assert !logFile.exists();
-
-        sameLog1.close();
-
-        // When we cloe the last logger, it must flush data to disk.
-        assert logFile.exists();
-
-        logFile.delete();
-
-        IgfsLogger sameLog2 = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        // This time we expect new logger instance to be created.
-        assert sameLog0 != sameLog2;
-
-        sameLog2.close();
-
-        // As we do not add any records to the logger, we do not expect flushing.
-        assert !logFile.exists();
-    }
-
-    /**
-     * Test read operations logging.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLogRead() throws Exception {
-        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        log.logOpen(1, PATH, PRIMARY, 2, 3L);
-        log.logRandomRead(1, 4L, 5);
-        log.logSeek(1, 6L);
-        log.logSkip(1, 7L);
-        log.logMark(1, 8L);
-        log.logReset(1);
-        log.logCloseIn(1, 9L, 10L, 11);
-
-        log.close();
-
-        checkLog(
-            new SB().a(U.jvmPid() + d() + TYPE_OPEN_IN + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() + 2 +
-                d() + 3 + d(14)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_RANDOM_READ + d(3) + 1 + d(7) + 4 + d() + 5 + d(8)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_SEEK + d(3) + 1 + d(7) + 6 + d(9)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_SKIP + d(3) + 1 + d(9) + 7 + d(7)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_MARK + d(3) + 1 + d(10) + 8 + d(6)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_RESET + d(3) + 1 + d(16)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_IN + d(3) + 1 + d(11) + 9 + d() + 10 + d() + 11 + d(3)).toString()
-        );
-    }
-
-    /**
-     * Test write operations logging.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLogWrite() throws Exception {
-        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        log.logCreate(1, PATH, PRIMARY, true, 2, new Integer(3).shortValue(), 4L);
-        log.logAppend(2, PATH, PRIMARY, 8);
-        log.logCloseOut(2, 9L, 10L, 11);
-
-        log.close();
-
-        checkLog(
-            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() +
-                2 + d(2) + 0 + d() + 1 + d() + 3 + d() + 4 + d(10)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + PATH_STR_ESCAPED + d() + PRIMARY + d() + 2 + d() +
-                8 + d(2) + 1 + d(13)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_OUT + d(3) + 2 + d(11) + 9 + d() + 10 + d() + 11 + d(3))
-                .toString()
-        );
-    }
-
-    /**
-     * Test miscellaneous operations logging.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("TooBroadScope")
-    public void testLogMisc() throws Exception {
-        IgfsLogger log = IgfsLogger.logger(ENDPOINT, IGFS_NAME, LOG_DIR, 10);
-
-        String newFile = "/dir3/file.test";
-        String file1 = "/dir3/file1.test";
-        String file2 = "/dir3/file1.test";
-
-        log.logMakeDirectory(PATH, PRIMARY);
-        log.logRename(PATH, PRIMARY, new IgfsPath(newFile));
-        log.logListDirectory(PATH, PRIMARY, new String[] { file1, file2 });
-        log.logDelete(PATH, PRIMARY, false);
-
-        log.close();
-
-        checkLog(
-            new SB().a(U.jvmPid() + d() + TYPE_DIR_MAKE + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(17)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_RENAME + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(15) + newFile +
-                d(2)).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_DIR_LIST + d() + PATH_STR_ESCAPED + d() + PRIMARY + d(17) + file1 +
-                DELIM_FIELD_VAL + file2).toString(),
-            new SB().a(U.jvmPid() + d() + TYPE_DELETE + d(1) + PATH_STR_ESCAPED + d() + PRIMARY + d(16) + 0 +
-                d()).toString()
-        );
-    }
-
-    /**
-     * Ensure that log file has only the following lines.
-     *
-     * @param lines Expected lines.
-     */
-    private void checkLog(String... lines) throws Exception {
-        BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(LOG_FILE)));
-
-        List<String> logLines = new ArrayList<>(lines.length);
-
-        String nextLogLine;
-
-        while ((nextLogLine = br.readLine()) != null)
-            logLines.add(nextLogLine);
-
-        U.closeQuiet(br);
-
-        assertEquals(lines.length + 1, logLines.size());
-
-        assertEquals(logLines.get(0), HDR);
-
-        for (int i = 0; i < lines.length; i++) {
-            String logLine = logLines.get(i + 1);
-
-            logLine = logLine.substring(logLine.indexOf(DELIM_FIELD, logLine.indexOf(DELIM_FIELD) + 1) + 1);
-
-            assertEquals(lines[i], logLine);
-        }
-    }
-
-    /**
-     * Return single field delimiter.
-     *
-     * @return Single field delimiter.
-     */
-    private String d() {
-        return d(1);
-    }
-
-    /**
-     * Return a bunch of field delimiters.
-     *
-     * @param cnt Amount of field delimiters.
-     * @return Field delimiters.
-     */
-    private String d(int cnt) {
-        SB buf = new SB();
-
-        for (int i = 0; i < cnt; i++)
-            buf.a(DELIM_FIELD);
-
-        return buf.toString();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
deleted file mode 100644
index 1bd5b41..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.lang.reflect.Field;
-import java.net.URI;
-import java.nio.file.Paths;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
-
-/**
- * Ensures that sampling is really turned on/off.
- */
-public class IgniteHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractTest {
-    /** IGFS. */
-    private IgfsEx igfs;
-
-    /** File system. */
-    private FileSystem fs;
-
-    /** Whether logging is enabled in FS configuration. */
-    private boolean logging;
-
-    /** whether sampling is enabled. */
-    private Boolean sampling;
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        U.closeQuiet(fs);
-
-        igfs = null;
-        fs = null;
-
-        G.stopAll(true);
-
-        logging = false;
-        sampling = null;
-    }
-
-    /**
-     * Startup the grid and instantiate the file system.
-     *
-     * @throws Exception If failed.
-     */
-    private void startUp() throws Exception {
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs");
-        igfsCfg.setBlockSize(512 * 1024);
-        igfsCfg.setDefaultMode(PRIMARY);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(10500);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName("igfs-grid");
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setConnectorConfiguration(null);
-
-        Ignite g = G.start(cfg);
-
-        igfs = (IgfsEx)g.fileSystem("igfs");
-
-        igfs.globalSampling(sampling);
-
-        fs = fileSystem();
-    }
-
-    /**
-     * When logging is disabled and sampling is not set no-op logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingDisabledSamplingNotSet() throws Exception {
-        startUp();
-
-        assert !logEnabled();
-    }
-
-    /**
-     * When logging is enabled and sampling is not set file logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingEnabledSamplingNotSet() throws Exception {
-        logging = true;
-
-        startUp();
-
-        assert logEnabled();
-    }
-
-    /**
-     * When logging is disabled and sampling is disabled no-op logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingDisabledSamplingDisabled() throws Exception {
-        sampling = false;
-
-        startUp();
-
-        assert !logEnabled();
-    }
-
-    /**
-     * When logging is enabled and sampling is disabled no-op logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingEnabledSamplingDisabled() throws Exception {
-        logging = true;
-        sampling = false;
-
-        startUp();
-
-        assert !logEnabled();
-    }
-
-    /**
-     * When logging is disabled and sampling is enabled file logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingDisabledSamplingEnabled() throws Exception {
-        sampling = true;
-
-        startUp();
-
-        assert logEnabled();
-    }
-
-    /**
-     * When logging is enabled and sampling is enabled file logger must be used.
-     *
-     * @throws Exception If failed.
-     */
-    public void testLoggingEnabledSamplingEnabled() throws Exception {
-        logging = true;
-        sampling = true;
-
-        startUp();
-
-        assert logEnabled();
-    }
-
-    /**
-     * Ensure sampling change through API causes changes in logging on subsequent client connections.
-     *
-     * @throws Exception If failed.
-     */
-    public void testSamplingChange() throws Exception {
-        // Start with sampling not set.
-        startUp();
-
-        assert !logEnabled();
-
-        fs.close();
-
-        // "Not set" => true transition.
-        igfs.globalSampling(true);
-
-        fs = fileSystem();
-
-        assert logEnabled();
-
-        fs.close();
-
-        // True => "not set" transition.
-        igfs.globalSampling(null);
-
-        fs = fileSystem();
-
-        assert !logEnabled();
-
-        // "Not-set" => false transition.
-        igfs.globalSampling(false);
-
-        fs = fileSystem();
-
-        assert !logEnabled();
-
-        fs.close();
-
-        // False => "not=set" transition.
-        igfs.globalSampling(null);
-
-        fs = fileSystem();
-
-        assert !logEnabled();
-
-        fs.close();
-
-        // True => false transition.
-        igfs.globalSampling(true);
-        igfs.globalSampling(false);
-
-        fs = fileSystem();
-
-        assert !logEnabled();
-
-        fs.close();
-
-        // False => true transition.
-        igfs.globalSampling(true);
-
-        fs = fileSystem();
-
-        assert logEnabled();
-    }
-
-    /**
-     * Ensure that log directory is set to IGFS when client FS connects.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    public void testLogDirectory() throws Exception {
-        startUp();
-
-        assertEquals(Paths.get(U.getIgniteHome()).normalize().toString(),
-            igfs.clientLogDirectory());
-    }
-
-    /**
-     * Instantiate new file system.
-     *
-     * @return New file system.
-     * @throws Exception If failed.
-     */
-    private IgniteHadoopFileSystem fileSystem() throws Exception {
-        Configuration fsCfg = new Configuration();
-
-        fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
-
-        fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        if (logging)
-            fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);
-
-        fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());
-
-        return (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
-    }
-
-    /**
-     * Ensure that real logger is used by the file system.
-     *
-     * @return {@code True} in case path is secondary.
-     * @throws Exception If failed.
-     */
-    private boolean logEnabled() throws Exception {
-        assert fs != null;
-
-        Field field = fs.getClass().getDeclaredField("clientLog");
-
-        field.setAccessible(true);
-
-        return ((IgfsLogger)field.get(fs)).isLogEnabled();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
deleted file mode 100644
index 6ed2249..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * IGFS Hadoop file system IPC loopback self test.
- */
-public abstract class IgniteHadoopFileSystemLoopbackAbstractSelfTest extends
-    IgniteHadoopFileSystemAbstractSelfTest {
-    /**
-     * Constructor.
-     *
-     * @param mode IGFS mode.
-     * @param skipEmbed Skip embedded mode flag.
-     */
-    protected IgniteHadoopFileSystemLoopbackAbstractSelfTest(IgfsMode mode, boolean skipEmbed) {
-        super(mode, skipEmbed, true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
-
-        return endpointCfg;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
deleted file mode 100644
index f1edb28..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode.
- */
-public class IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest extends
-    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest() {
-        super(DUAL_ASYNC, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
deleted file mode 100644
index 97a6991..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode.
- */
-public class IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest
-    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest() {
-        super(DUAL_SYNC, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
deleted file mode 100644
index f9ecc4b..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in PRIMARY mode.
- */
-public class IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest
-    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest() {
-        super(PRIMARY, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
deleted file mode 100644
index 719df6d..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in SECONDARY mode.
- */
-public class IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest extends
-    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest() {
-        super(PROXY, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
deleted file mode 100644
index 764624d..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode.
- */
-public class IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest extends
-    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest() {
-        super(DUAL_ASYNC, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
deleted file mode 100644
index 21a248a..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode.
- */
-public class IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest
-    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest() {
-        super(DUAL_SYNC, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
deleted file mode 100644
index 092c7a5..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in PRIMARY mode.
- */
-public class IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest
-    extends IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest() {
-        super(PRIMARY, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
deleted file mode 100644
index 9f7d21b..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * IGFS Hadoop file system IPC loopback self test in SECONDARY mode.
- */
-public class IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest extends
-    IgniteHadoopFileSystemLoopbackAbstractSelfTest {
-
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest() {
-        super(PROXY, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
deleted file mode 100644
index 1b48870..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-
-import java.net.URI;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * Ensures correct modes resolution for SECONDARY paths.
- */
-public class IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest extends IgfsCommonAbstractTest {
-    /** File system. */
-    private IgniteHadoopFileSystem fs;
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        U.closeQuiet(fs);
-
-        fs = null;
-
-        G.stopAll(true);
-    }
-
-    /**
-     * Perform initial startup.
-     *
-     * @param initDfltPathModes WHether to initialize default path modes.
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings({"NullableProblems", "unchecked"})
-    private void startUp(boolean initDfltPathModes) throws Exception {
-        startUpSecondary();
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs");
-        igfsCfg.setBlockSize(512 * 1024);
-        igfsCfg.setInitializeDefaultPathModes(initDfltPathModes);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(10500);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        igfsCfg.setManagementPort(-1);
-        igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
-            "igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/",
-            "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
-
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName("igfs-grid");
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-
-        G.start(cfg);
-
-        Configuration fsCfg = new Configuration();
-
-        fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
-
-        fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        fs = (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
-    }
-
-    /**
-     * Startup secondary file system.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("unchecked")
-    private void startUpSecondary() throws Exception {
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs-secondary");
-        igfsCfg.setBlockSize(512 * 1024);
-        igfsCfg.setDefaultMode(PRIMARY);
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.TCP);
-        endpointCfg.setPort(11500);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName("igfs-grid-secondary");
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-
-        G.start(cfg);
-    }
-
-    /**
-     * Test scenario when defaults are initialized.
-     *
-     * @throws Exception If failed.
-     */
-    public void testDefaultsInitialized() throws Exception {
-        check(true);
-    }
-
-    /**
-     * Test scenario when defaults are not initialized.
-     *
-     * @throws Exception If failed.
-     */
-    public void testDefaultsNotInitialized() throws Exception {
-        check(false);
-    }
-
-    /**
-     * Actual check.
-     *
-     * @param initDfltPathModes Whether to initialize default path modes.
-     * @throws Exception If failed.
-     */
-    private void check(boolean initDfltPathModes) throws Exception {
-        startUp(initDfltPathModes);
-
-        assertEquals(initDfltPathModes, fs.hasSecondaryFileSystem());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
deleted file mode 100644
index d8cf74c..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.util.Collection;
-import java.util.LinkedList;
-import java.util.concurrent.Callable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.util.ipc.IpcEndpoint;
-import org.apache.ignite.internal.util.ipc.IpcEndpointFactory;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.testframework.GridTestUtils;
-
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * IGFS Hadoop file system IPC self test.
- */
-public abstract class IgniteHadoopFileSystemShmemAbstractSelfTest extends IgniteHadoopFileSystemAbstractSelfTest {
-    /**
-     * Constructor.
-     *
-     * @param mode IGFS mode.
-     * @param skipEmbed Skip embedded mode flag.
-     */
-    protected IgniteHadoopFileSystemShmemAbstractSelfTest(IgfsMode mode, boolean skipEmbed) {
-        super(mode, skipEmbed, false);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
-        endpointCfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
-
-        return endpointCfg;
-    }
-
-    /**
-     * Checks correct behaviour in case when we run out of system
-     * resources.
-     *
-     * @throws Exception If error occurred.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testOutOfResources() throws Exception {
-        final Collection<IpcEndpoint> eps = new LinkedList<>();
-
-        try {
-            IgniteCheckedException e = (IgniteCheckedException)GridTestUtils.assertThrows(log, new Callable<Object>() {
-                @SuppressWarnings("InfiniteLoopStatement")
-                @Override public Object call() throws Exception {
-                    while (true) {
-                        IpcEndpoint ep = IpcEndpointFactory.connectEndpoint("shmem:10500", log);
-
-                        eps.add(ep);
-                    }
-                }
-            }, IgniteCheckedException.class, null);
-
-            assertNotNull(e);
-
-            String msg = e.getMessage();
-
-            assertTrue("Invalid exception: " + X.getFullStackTrace(e),
-                msg.contains("(error code: 28)") ||
-                msg.contains("(error code: 24)") ||
-                msg.contains("(error code: 12)"));
-        }
-        finally {
-            for (IpcEndpoint ep : eps)
-                ep.close();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
deleted file mode 100644
index d0d570f..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode.
- */
-public class IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest() {
-        super(DUAL_ASYNC, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
deleted file mode 100644
index 2e5b015..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode.
- */
-public class IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest() {
-        super(DUAL_SYNC, false);
-    }
-}
\ No newline at end of file


[36/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
new file mode 100644
index 0000000..93a924c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
@@ -0,0 +1,2040 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.communication.CommunicationSpi;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ThreadLocalRandom8;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * Hadoop 2.x compliant file system.
+ */
+public abstract class HadoopIgfs20FileSystemAbstractSelfTest extends IgfsCommonAbstractTest {
+    /** Group size. */
+    public static final int GRP_SIZE = 128;
+
+    /** Thread count for multithreaded tests. */
+    private static final int THREAD_CNT = 8;
+
+    /** Secondary file system user. */
+    private static final String SECONDARY_FS_USER = "secondary-default";
+
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Barrier for multithreaded tests. */
+    private static CyclicBarrier barrier;
+
+    /** File system. */
+    private static AbstractFileSystem fs;
+
+    /** Default IGFS mode. */
+    protected IgfsMode mode;
+
+    /** Primary file system URI. */
+    protected URI primaryFsUri;
+
+    /** Primary file system configuration. */
+    protected Configuration primaryFsCfg;
+
+    /**
+     * Constructor.
+     *
+     * @param mode Default IGFS mode.
+     */
+    protected HadoopIgfs20FileSystemAbstractSelfTest(IgfsMode mode) {
+        this.mode = mode;
+    }
+
+    /**
+     * Gets primary file system URI path.
+     *
+     * @return Primary file system URI path.
+     */
+    protected abstract String primaryFileSystemUriPath();
+
+    /**
+     * Gets primary file system config path.
+     *
+     * @return Primary file system config path.
+     */
+    protected abstract String primaryFileSystemConfigPath();
+
+    /**
+     * Get primary IPC endpoint configuration.
+     *
+     * @param gridName Grid name.
+     * @return IPC primary endpoint configuration.
+     */
+    protected abstract IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(String gridName);
+
+    /**
+     * Gets secondary file system URI path.
+     *
+     * @return Secondary file system URI path.
+     */
+    protected abstract String secondaryFileSystemUriPath();
+
+    /**
+     * Gets secondary file system config path.
+     *
+     * @return Secondary file system config path.
+     */
+    protected abstract String secondaryFileSystemConfigPath();
+
+    /**
+     * Get secondary IPC endpoint configuration.
+     *
+     * @return Secondary IPC endpoint configuration.
+     */
+    protected abstract IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration();
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startNodes();
+    }
+
+    /**
+     * Starts the nodes for this test.
+     *
+     * @throws Exception If failed.
+     */
+    private void startNodes() throws Exception {
+        if (mode != PRIMARY) {
+            // Start secondary IGFS.
+            FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+            igfsCfg.setDataCacheName("partitioned");
+            igfsCfg.setMetaCacheName("replicated");
+            igfsCfg.setName("igfs_secondary");
+            igfsCfg.setIpcEndpointConfiguration(secondaryIpcEndpointConfiguration());
+            igfsCfg.setManagementPort(-1);
+            igfsCfg.setBlockSize(512 * 1024);
+            igfsCfg.setPrefetchBlocks(1);
+
+            CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+            cacheCfg.setName("partitioned");
+            cacheCfg.setCacheMode(PARTITIONED);
+            cacheCfg.setNearConfiguration(null);
+            cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+            cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+            cacheCfg.setBackups(0);
+            cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+            CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+            metaCacheCfg.setName("replicated");
+            metaCacheCfg.setCacheMode(REPLICATED);
+            metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+            metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+            IgniteConfiguration cfg = new IgniteConfiguration();
+
+            cfg.setGridName("grid_secondary");
+
+            TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+            discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+            cfg.setDiscoverySpi(discoSpi);
+            cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+            cfg.setFileSystemConfiguration(igfsCfg);
+            cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+            cfg.setLocalHost(U.getLocalHost().getHostAddress());
+            cfg.setCommunicationSpi(communicationSpi());
+
+            G.start(cfg);
+        }
+
+        startGrids(4);
+
+        awaitPartitionMapExchange();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getTestGridName() {
+        return "grid";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(cacheConfiguration(gridName));
+        cfg.setFileSystemConfiguration(igfsConfiguration(gridName));
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setCommunicationSpi(communicationSpi());
+
+        return cfg;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @param gridName Grid name.
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration[] cacheConfiguration(String gridName) {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
+    }
+
+    /**
+     * Gets IGFS configuration.
+     *
+     * @param gridName Grid name.
+     * @return IGFS configuration.
+     */
+    protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException {
+        FileSystemConfiguration cfg = new FileSystemConfiguration();
+
+        cfg.setDataCacheName("partitioned");
+        cfg.setMetaCacheName("replicated");
+        cfg.setName("igfs");
+        cfg.setPrefetchBlocks(1);
+        cfg.setMaxSpaceSize(64 * 1024 * 1024);
+        cfg.setDefaultMode(mode);
+
+        if (mode != PRIMARY)
+            cfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(secondaryFileSystemUriPath(),
+                secondaryFileSystemConfigPath(), SECONDARY_FS_USER));
+
+        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
+        cfg.setManagementPort(-1);
+
+        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
+
+        return cfg;
+    }
+
+    /** @return Communication SPI. */
+    private CommunicationSpi communicationSpi() {
+        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
+
+        commSpi.setSharedMemoryPort(-1);
+
+        return commSpi;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        primaryFsUri = new URI(primaryFileSystemUriPath());
+
+        primaryFsCfg = new Configuration();
+
+        primaryFsCfg.addResource(U.resolveIgniteUrl(primaryFileSystemConfigPath()));
+
+        UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, getClientFsUser());
+
+        // Create Fs on behalf of the client user:
+        ugi.doAs(new PrivilegedExceptionAction<Object>() {
+            @Override public Object run() throws Exception {
+                fs = AbstractFileSystem.get(primaryFsUri, primaryFsCfg);
+
+                return null;
+            }
+        });
+
+        barrier = new CyclicBarrier(THREAD_CNT);
+    }
+
+    /**
+     * Gets the user the Fs client operates on bahalf of.
+     * @return The user the Fs client operates on bahalf of.
+     */
+    protected String getClientFsUser() {
+        return "foo";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        try {
+            HadoopIgfsUtils.clear(fs);
+        }
+        catch (Exception ignore) {
+            // No-op.
+        }
+
+        U.closeQuiet((Closeable)fs);
+    }
+
+    /** @throws Exception If failed. */
+    public void testStatus() throws Exception {
+        Path file1 = new Path("/file1");
+
+        try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()))) {
+            file.write(new byte[1024 * 1024]);
+        }
+
+        FsStatus status = fs.getFsStatus();
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner());
+
+        assertEquals(4, grid(0).cluster().nodes().size());
+
+        long used = 0, max = 0;
+
+        for (int i = 0; i < 4; i++) {
+            IgniteFileSystem igfs = grid(i).fileSystem("igfs");
+
+            IgfsMetrics metrics = igfs.metrics();
+
+            used += metrics.localSpaceSize();
+            max += metrics.maxSpaceSize();
+        }
+
+        assertEquals(used, status.getUsed());
+        assertEquals(max, status.getCapacity());
+    }
+
+    /** @throws Exception If failed. */
+    public void testTimes() throws Exception {
+        Path file = new Path("/file1");
+
+        long now = System.currentTimeMillis();
+
+        try (FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()))) {
+            os.write(new byte[1024 * 1024]);
+        }
+
+        FileStatus status = fs.getFileStatus(file);
+
+        assertTrue(status.getAccessTime() >= now);
+        assertTrue(status.getModificationTime() >= now);
+
+        long accessTime = now - 10 * 60 * 1000;
+        long modificationTime = now - 5 * 60 * 1000;
+
+        fs.setTimes(file, modificationTime, accessTime);
+
+        status = fs.getFileStatus(file);
+        assertEquals(accessTime, status.getAccessTime());
+        assertEquals(modificationTime, status.getModificationTime());
+
+        // Check listing is updated as well.
+        FileStatus[] files = fs.listStatus(new Path("/"));
+
+        assertEquals(1, files.length);
+
+        assertEquals(file.getName(), files[0].getPath().getName());
+        assertEquals(accessTime, files[0].getAccessTime());
+        assertEquals(modificationTime, files[0].getModificationTime());
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.setTimes(new Path("/unknownFile"), 0, 0);
+
+                return null;
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testCreateCheckParameters() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(null, EnumSet.noneOf(CreateFlag.class),
+                    Options.CreateOpts.perms(FsPermission.getDefault()));
+            }
+        }, NullPointerException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testCreateBase() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+        Path file = new Path(dir, "someFile");
+
+        assertPathDoesNotExist(fs, file);
+
+        FsPermission fsPerm = new FsPermission((short)644);
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(fsPerm));
+
+        // Try to write something in file.
+        os.write("abc".getBytes());
+
+        os.close();
+
+        // Check file status.
+        FileStatus fileStatus = fs.getFileStatus(file);
+
+        assertFalse(fileStatus.isDirectory());
+        assertEquals(file, fileStatus.getPath());
+        assertEquals(fsPerm, fileStatus.getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    public void testCreateCheckOverwrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+        final Path file = new Path(dir, "someFile");
+
+        FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        out.close();
+
+        // Check intermediate directory permissions.
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(file, EnumSet.noneOf(CreateFlag.class),
+                    Options.CreateOpts.perms(FsPermission.getDefault()));
+            }
+        }, PathExistsException.class, null);
+
+        // Overwrite should be successful.
+        FSDataOutputStream out1 = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        out1.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfNoSuchPath() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        assertPathDoesNotExist(fs, dir);
+
+        assertFalse(fs.delete(dir, true));
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks.
+
+        for (int i = 0; i < cnt; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        final FSDataInputStream is = fs.open(file, -1);
+
+        for (int i = 0; i < cnt / 2; i++)
+            assertEquals(i, is.readInt());
+
+        assert fs.delete(file, false);
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.getFileStatus(file);
+
+                return null;
+            }
+        }, FileNotFoundException.class, null);
+
+        is.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfFilePathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        assertTrue(fs.delete(file, false));
+
+        assertPathDoesNotExist(fs, file);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfDirectoryPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(dir, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        assertTrue(fs.delete(dir, false));
+
+        assertPathDoesNotExist(fs, dir);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteFailsIfNonRecursive() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        final Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.delete(someDir2, false);
+
+                return null;
+            }
+        }, PathIsNotEmptyDirectoryException.class, null);
+
+        assertPathExists(fs, someDir2);
+        assertPathExists(fs, someDir3);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteRecursively() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
+
+        assertTrue(fs.delete(someDir2, true));
+
+        assertPathDoesNotExist(fs, someDir2);
+        assertPathDoesNotExist(fs, someDir3);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteRecursivelyFromRoot() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        Path root = new Path(fsHome, "/");
+
+        assertFalse(fs.delete(root, true));
+
+        assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true));
+
+        assertPathDoesNotExist(fs, someDir3);
+        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
+        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
+        assertPathExists(fs, root);
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetPermissionCheckDefaultPermission() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        fs.setPermission(file, null);
+
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetPermissionCheckNonRecursiveness() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        Path tmpDir = new Path(fsHome, "/tmp");
+
+        FsPermission perm = new FsPermission((short)123);
+
+        fs.setPermission(tmpDir, perm);
+
+        assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("OctalInteger")
+    public void testSetPermission() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        for (short i = 0; i <= 0777; i += 7) {
+            FsPermission perm = new FsPermission(i);
+
+            fs.setPermission(file, perm);
+
+            assertEquals(perm, fs.getFileStatus(file).getPermission());
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FsPermission perm = new FsPermission((short)123);
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        fs.setPermission(file, perm);
+
+        os.close();
+
+        assertEquals(perm, fs.getFileStatus(file).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.setOwner(null, "aUser", "aGroup");
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: p");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersUserIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.setOwner(file, null, "aGroup");
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: username");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersGroupIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+                fs.setOwner(file, "aUser", null);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: grpName");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwner() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
+
+        fs.setOwner(file, "aUser", "aGroup");
+
+        assertEquals("aUser", fs.getFileStatus(file).getOwner());
+        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerIfOutputStreamIsNotClosed() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        fs.setOwner(file, "aUser", "aGroup");
+
+        os.close();
+
+        assertEquals("aUser", fs.getFileStatus(file).getOwner());
+        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckNonRecursiveness() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        Path tmpDir = new Path(fsHome, "/tmp");
+
+        fs.setOwner(file, "fUser", "fGroup");
+        fs.setOwner(tmpDir, "dUser", "dGroup");
+
+        assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
+        assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
+
+        assertEquals("fUser", fs.getFileStatus(file).getOwner());
+        assertEquals("fGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenCheckParametersPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.open(null, 1024);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenNoSuchPath() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.open(file, 1024);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenIfPathIsAlreadyOpened() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        FSDataInputStream is1 = fs.open(file);
+        FSDataInputStream is2 = fs.open(file);
+
+        is1.close();
+        is2.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpen() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        int cnt = 2 * 1024;
+
+        try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()))) {
+
+            for (long i = 0; i < cnt; i++)
+                out.writeLong(i);
+        }
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
+
+        try (FSDataInputStream in = fs.open(file, 1024)) {
+
+            for (long i = 0; i < cnt; i++)
+                assertEquals(i, in.readLong());
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppendIfPathPointsToDirectory() throws Exception {
+        final Path fsHome = new Path(primaryFsUri);
+        final Path dir = new Path(fsHome, "/tmp");
+        Path file = new Path(dir, "my");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND),
+                    Options.CreateOpts.perms(FsPermission.getDefault()));
+            }
+        }, IOException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        FSDataOutputStream appendOs = fs.create(file, EnumSet.of(CreateFlag.APPEND),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(file, EnumSet.of(CreateFlag.APPEND),
+                    Options.CreateOpts.perms(FsPermission.getDefault()));
+            }
+        }, IOException.class, null);
+
+        appendOs.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppend() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        int cnt = 1024;
+
+        FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        for (int i = 0; i < cnt; i++)
+            out.writeLong(i);
+
+        out.close();
+
+        out = fs.create(file, EnumSet.of(CreateFlag.APPEND),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        for (int i = cnt; i < cnt * 2; i++)
+            out.writeLong(i);
+
+        out.close();
+
+        FSDataInputStream in = fs.open(file, 1024);
+
+        for (int i = 0; i < cnt * 2; i++)
+            assertEquals(i, in.readLong());
+
+        in.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameCheckParametersSrcPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.rename(null, file);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameCheckParametersDstPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault())).close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+                fs.rename(file, null);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathDoesNotExist() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path srcFile = new Path(fsHome, "srcFile");
+        final Path dstFile = new Path(fsHome, "dstFile");
+
+        assertPathDoesNotExist(fs, srcFile);
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.rename(srcFile, dstFile);
+
+                return null;
+            }
+        }, FileNotFoundException.class, null);
+
+        assertPathDoesNotExist(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathIsAlreadyBeingOpenedToWrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        os = fs.create(srcFile, EnumSet.of(CreateFlag.APPEND),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        fs.rename(srcFile, dstFile);
+
+        assertPathExists(fs, dstFile);
+
+        String testStr = "Test";
+
+        try {
+            os.writeBytes(testStr);
+        }
+        finally {
+            os.close();
+        }
+
+        try (FSDataInputStream is = fs.open(dstFile)) {
+            byte[] buf = new byte[testStr.getBytes().length];
+
+            is.readFully(buf);
+
+            assertEquals(testStr, new String(buf));
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameFileIfDstPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path srcFile = new Path(fsHome, "srcFile");
+        final Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        os = fs.create(dstFile, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.rename(srcFile, dstFile);
+
+                return null;
+            }
+        }, FileAlreadyExistsException.class, null);
+
+        assertPathExists(fs, srcFile);
+        assertPathExists(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameFile() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "/tmp/srcFile");
+        Path dstFile = new Path(fsHome, "/tmp/dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        fs.rename(srcFile, dstFile);
+
+        assertPathDoesNotExist(fs, srcFile);
+        assertPathExists(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        int cnt = 1024;
+
+        for (int i = 0; i < cnt; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        FSDataInputStream is = fs.open(srcFile);
+
+        for (int i = 0; i < cnt; i++) {
+            if (i == 100)
+                // Rename file during the read process.
+                fs.rename(srcFile, dstFile);
+
+            assertEquals(i, is.readInt());
+        }
+
+        assertPathDoesNotExist(fs, srcFile);
+        assertPathExists(fs, dstFile);
+
+        os.close();
+        is.close();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRenameDirectoryIfDstPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcDir = new Path(fsHome, "/tmp/");
+        Path dstDir = new Path(fsHome, "/tmpNew/");
+
+        FSDataOutputStream os = fs.create(new Path(srcDir, "file1"), EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        os = fs.create(new Path(dstDir, "file2"), EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        try {
+            fs.rename(srcDir, dstDir);
+
+            fail("FileAlreadyExistsException expected.");
+        }
+        catch (FileAlreadyExistsException ignore) {
+            // No-op.
+        }
+
+        // Check all the files stay unchanged:
+        assertPathExists(fs, dstDir);
+        assertPathExists(fs, new Path(dstDir, "file2"));
+
+        assertPathExists(fs, srcDir);
+        assertPathExists(fs, new Path(srcDir, "file1"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameDirectory() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/tmp/");
+        Path newDir = new Path(fsHome, "/tmpNew/");
+
+        FSDataOutputStream os = fs.create(new Path(dir, "myFile"), EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        os.close();
+
+        fs.rename(dir, newDir);
+
+        assertPathDoesNotExist(fs, dir);
+        assertPathExists(fs, newDir);
+    }
+
+    /** @throws Exception If failed. */
+    public void testListStatusIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.listStatus(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testListStatusIfPathDoesNotExist() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.listStatus(new Path("/someDir"));
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /**
+     * Test directory listing.
+     *
+     * @throws Exception If failed.
+     */
+    public void testListStatus() throws Exception {
+        Path igfsHome = new Path(primaryFsUri);
+
+        // Test listing of an empty directory.
+        Path dir = new Path(igfsHome, "dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        FileStatus[] list = fs.listStatus(dir);
+
+        assert list.length == 0;
+
+        // Test listing of a not empty directory.
+        Path subDir = new Path(dir, "subDir");
+
+        fs.mkdir(subDir, FsPermission.getDefault(), true);
+
+        Path file = new Path(dir, "file");
+
+        FSDataOutputStream fos = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        fos.close();
+
+        list = fs.listStatus(dir);
+
+        assert list.length == 2;
+
+        String listRes1 = list[0].getPath().getName();
+        String listRes2 = list[1].getPath().getName();
+
+        assert "subDir".equals(listRes1) && "file".equals(listRes2) || "subDir".equals(listRes2) &&
+            "file".equals(listRes1);
+
+        // Test listing of a file.
+        list = fs.listStatus(file);
+
+        assert list.length == 1;
+
+        assert "file".equals(list[0].getPath().getName());
+    }
+
+    /** @throws Exception If failed. */
+    public void testMkdirsIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.mkdir(null, FsPermission.getDefault(), true);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testMkdirsIfPermissionIsNull() throws Exception {
+        Path dir = new Path("/tmp");
+
+        fs.mkdir(dir, null, true);
+
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("OctalInteger")
+    public void testMkdirs() throws Exception {
+        Path fsHome = new Path(primaryFileSystemUriPath());
+        Path dir = new Path(fsHome, "/tmp/staging");
+        Path nestedDir = new Path(dir, "nested");
+
+        FsPermission dirPerm = FsPermission.createImmutable((short)0700);
+        FsPermission nestedDirPerm = FsPermission.createImmutable((short)111);
+
+        fs.mkdir(dir, dirPerm, true);
+        fs.mkdir(nestedDir, nestedDirPerm, true);
+
+        assertEquals(dirPerm, fs.getFileStatus(dir).getPermission());
+        assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission());
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner());
+        assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner());
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileStatusIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileStatusIfPathDoesNotExist() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(new Path("someDir"));
+            }
+        }, FileNotFoundException.class, "File not found: someDir");
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocationsIfFileStatusIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                // Argument is checked by Hadoop.
+                return fs.getFileBlockLocations(null, 1, 2);
+            }
+        }, NullPointerException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileBlockLocations(new Path("/someFile"), 1, 2);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocations() throws Exception {
+        Path igfsHome = new Path(primaryFsUri);
+
+        Path file = new Path(igfsHome, "someFile");
+
+        try (OutputStream out = new BufferedOutputStream(fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault())))) {
+            byte[] data = new byte[128 * 1024];
+
+            for (int i = 0; i < 100; i++)
+                out.write(data);
+
+            out.flush();
+        }
+
+        try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
+            byte[] data = new byte[128 * 1024];
+
+            int read;
+
+            do {
+                read = in.read(data);
+            }
+            while (read > 0);
+        }
+
+        FileStatus status = fs.getFileStatus(file);
+
+        int grpLen = 128 * 512 * 1024;
+
+        int grpCnt = (int)((status.getLen() + grpLen - 1) / grpLen);
+
+        BlockLocation[] locations = fs.getFileBlockLocations(file, 0, status.getLen());
+
+        assertEquals(grpCnt, locations.length);
+    }
+
+    /** @throws Exception If failed. */
+    public void testZeroReplicationFactor() throws Exception {
+        // This test doesn't make sense for any mode except of PRIMARY.
+        if (mode == PRIMARY) {
+            Path igfsHome = new Path(primaryFsUri);
+
+            Path file = new Path(igfsHome, "someFile");
+
+            try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+                Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.repFac((short)1))) {
+                out.write(new byte[1024 * 1024]);
+            }
+
+            IgniteFileSystem igfs = grid(0).fileSystem("igfs");
+
+            IgfsPath filePath = new IgfsPath("/someFile");
+
+            IgfsFile fileInfo = igfs.info(filePath);
+
+            Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length());
+
+            assertEquals(1, locations.size());
+
+            IgfsBlockLocation location = F.first(locations);
+
+            assertEquals(1, location.nodeIds().size());
+        }
+    }
+
+    /**
+     * Ensure that when running in multithreaded mode only one create() operation succeed.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedCreate() throws Exception {
+        Path dir = new Path(new Path(primaryFsUri), "/dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        final Path file = new Path(dir, "file");
+
+        fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault())).close();
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                int idx = cnt.getAndIncrement();
+
+                byte[] data = new byte[256];
+
+                Arrays.fill(data, (byte)idx);
+
+                FSDataOutputStream os = null;
+
+                try {
+                    os = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
+                        Options.CreateOpts.perms(FsPermission.getDefault()));
+
+                    os.write(data);
+                }
+                catch (IOException ignore) {
+                    errs.add(idx);
+                }
+                finally {
+                    U.awaitQuiet(barrier);
+
+                    U.closeQuiet(os);
+                }
+            }
+        }, THREAD_CNT);
+
+        // Only one thread could obtain write lock on the file.
+        assert errs.size() == THREAD_CNT - 1 : "Invalid errors count [expected=" + (THREAD_CNT - 1) + ", actual=" +
+            errs.size() + ']';
+
+        int idx = -1;
+
+        for (int i = 0; i < THREAD_CNT; i++) {
+            if (!errs.remove(i)) {
+                idx = i;
+
+                break;
+            }
+        }
+
+        byte[] expData = new byte[256];
+
+        Arrays.fill(expData, (byte)idx);
+
+        FSDataInputStream is = fs.open(file);
+
+        byte[] data = new byte[256];
+
+        is.read(data);
+
+        is.close();
+
+        assert Arrays.equals(expData, data);
+    }
+
+    /**
+     * Ensure that when running in multithreaded mode only one append() operation succeed.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedAppend() throws Exception {
+        Path dir = new Path(new Path(primaryFsUri), "/dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        final Path file = new Path(dir, "file");
+
+        fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault())).close();
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                int idx = cnt.getAndIncrement();
+
+                byte[] data = new byte[256];
+
+                Arrays.fill(data, (byte)idx);
+
+                U.awaitQuiet(barrier);
+
+                FSDataOutputStream os = null;
+
+                try {
+                    os = fs.create(file, EnumSet.of(CreateFlag.APPEND),
+                        Options.CreateOpts.perms(FsPermission.getDefault()));
+
+                    os.write(data);
+                }
+                catch (IOException ignore) {
+                    errs.add(idx);
+                }
+                finally {
+                    U.awaitQuiet(barrier);
+
+                    U.closeQuiet(os);
+                }
+            }
+        }, THREAD_CNT);
+
+        // Only one thread could obtain write lock on the file.
+        assert errs.size() == THREAD_CNT - 1;
+
+        int idx = -1;
+
+        for (int i = 0; i < THREAD_CNT; i++) {
+            if (!errs.remove(i)) {
+                idx = i;
+
+                break;
+            }
+        }
+
+        byte[] expData = new byte[256];
+
+        Arrays.fill(expData, (byte)idx);
+
+        FSDataInputStream is = fs.open(file);
+
+        byte[] data = new byte[256];
+
+        is.read(data);
+
+        is.close();
+
+        assert Arrays.equals(expData, data);
+    }
+
+    /**
+     * Test concurrent reads within the file.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedOpen() throws Exception {
+        final byte[] dataChunk = new byte[256];
+
+        for (int i = 0; i < dataChunk.length; i++)
+            dataChunk[i] = (byte)i;
+
+        Path dir = new Path(new Path(primaryFsUri), "/dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        final Path file = new Path(dir, "file");
+
+        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault()));
+
+        // Write 256 * 2048 = 512Kb of data.
+        for (int i = 0; i < 2048; i++)
+            os.write(dataChunk);
+
+        os.close();
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override
+            public void run() {
+                FSDataInputStream is = null;
+
+                try {
+                    int pos = ThreadLocalRandom8.current().nextInt(2048);
+
+                    try {
+                        is = fs.open(file);
+                    }
+                    finally {
+                        U.awaitQuiet(barrier);
+                    }
+
+                    is.seek(256 * pos);
+
+                    byte[] buf = new byte[256];
+
+                    for (int i = pos; i < 2048; i++) {
+                        // First perform normal read.
+                        int read = is.read(buf);
+
+                        assert read == 256;
+
+                        Arrays.equals(dataChunk, buf);
+                    }
+
+                    int res = is.read(buf);
+
+                    assert res == -1;
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+                finally {
+                    U.closeQuiet(is);
+                }
+            }
+        }, THREAD_CNT);
+
+        assert !err.get();
+    }
+
+    /**
+     * Test concurrent creation of multiple directories.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedMkdirs() throws Exception {
+        final Path dir = new Path(new Path("igfs:///"), "/dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        final int depth = 3;
+        final int entryCnt = 5;
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+                queue.add(F.t(0, dir));
+
+                U.awaitQuiet(barrier);
+
+                while (!queue.isEmpty()) {
+                    IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+                    int curDepth = t.getKey();
+                    Path curPath = t.getValue();
+
+                    if (curDepth <= depth) {
+                        int newDepth = curDepth + 1;
+
+                        // Create directories.
+                        for (int i = 0; i < entryCnt; i++) {
+                            Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                            try {
+                                fs.mkdir(subDir, FsPermission.getDefault(), true);
+                            }
+                            catch (IOException ignore) {
+                                err.set(true);
+                            }
+
+                            queue.addLast(F.t(newDepth, subDir));
+                        }
+                    }
+                }
+            }
+        }, THREAD_CNT);
+
+        // Ensure there were no errors.
+        assert !err.get();
+
+        // Ensure correct folders structure.
+        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+        queue.add(F.t(0, dir));
+
+        while (!queue.isEmpty()) {
+            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+            int curDepth = t.getKey();
+            Path curPath = t.getValue();
+
+            if (curDepth <= depth) {
+                int newDepth = curDepth + 1;
+
+                // Create directories.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                    assertNotNull(fs.getFileStatus(subDir));
+
+                    queue.add(F.t(newDepth, subDir));
+                }
+            }
+        }
+    }
+
+    /**
+     * Test concurrent deletion of the same directory with advanced structure.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("TooBroadScope")
+    public void testMultithreadedDelete() throws Exception {
+        final Path dir = new Path(new Path(primaryFsUri), "/dir");
+
+        fs.mkdir(dir, FsPermission.getDefault(), true);
+
+        int depth = 3;
+        int entryCnt = 5;
+
+        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+        queue.add(F.t(0, dir));
+
+        while (!queue.isEmpty()) {
+            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+            int curDepth = t.getKey();
+            Path curPath = t.getValue();
+
+            if (curDepth < depth) {
+                int newDepth = curDepth + 1;
+
+                // Create directories.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                    fs.mkdir(subDir, FsPermission.getDefault(), true);
+
+                    queue.addLast(F.t(newDepth, subDir));
+                }
+            }
+            else {
+                // Create files.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path file = new Path(curPath, "file " + i);
+
+                    fs.create(file, EnumSet.noneOf(CreateFlag.class),
+                        Options.CreateOpts.perms(FsPermission.getDefault())).close();
+                }
+            }
+        }
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                try {
+                    U.awaitQuiet(barrier);
+
+                    fs.delete(dir, true);
+                }
+                catch (FileNotFoundException ignore) {
+                    // No-op.
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+            }
+        }, THREAD_CNT);
+
+        // Ensure there were no errors.
+        assert !err.get();
+
+        // Ensure the directory was actually deleted.
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.getFileStatus(dir);
+
+                return null;
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testConsistency() throws Exception {
+        // Default buffers values
+        checkConsistency(-1, 1, -1, -1, 1, -1);
+        checkConsistency(-1, 10, -1, -1, 10, -1);
+        checkConsistency(-1, 100, -1, -1, 100, -1);
+        checkConsistency(-1, 1000, -1, -1, 1000, -1);
+        checkConsistency(-1, 10000, -1, -1, 10000, -1);
+        checkConsistency(-1, 100000, -1, -1, 100000, -1);
+
+        checkConsistency(65 * 1024 + 13, 100000, -1, -1, 100000, -1);
+
+        checkConsistency(-1, 100000, 2 * 4 * 1024 + 17, -1, 100000, -1);
+
+        checkConsistency(-1, 100000, -1, 65 * 1024 + 13, 100000, -1);
+
+        checkConsistency(-1, 100000, -1, -1, 100000, 2 * 4 * 1024 + 17);
+
+        checkConsistency(65 * 1024 + 13, 100000, 2 * 4 * 1024 + 13, 65 * 1024 + 149, 100000, 2 * 4 * 1024 + 157);
+    }
+
+    /**
+     * Verifies that client reconnects after connection to the server has been lost.
+     *
+     * @throws Exception If error occurs.
+     */
+    public void testClientReconnect() throws Exception {
+        final Path igfsHome = new Path(primaryFsUri);
+
+        final Path filePath = new Path(igfsHome, "someFile");
+
+        final FSDataOutputStream s = fs.create(filePath, EnumSet.noneOf(CreateFlag.class),
+            Options.CreateOpts.perms(FsPermission.getDefault())); // Open stream before stopping IGFS.
+
+        try {
+            G.stopAll(true); // Stop the server.
+
+            startNodes(); // Start server again.
+
+            // Check that client is again operational.
+            fs.mkdir(new Path("igfs:///dir1/dir2"), FsPermission.getDefault(), true);
+
+            // However, the streams, opened before disconnect, should not be valid.
+            GridTestUtils.assertThrows(log, new Callable<Object>() {
+                @Nullable @Override public Object call() throws Exception {
+                    s.write("test".getBytes());
+
+                    s.flush();
+
+                    return null;
+                }
+            }, IOException.class, null);
+
+            GridTestUtils.assertThrows(log, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    fs.getFileStatus(filePath);
+
+                    return null;
+                }
+            }, FileNotFoundException.class, null);
+        }
+        finally {
+            U.closeQuiet(s);
+        }
+    }
+
+    /**
+     * Verifies that client reconnects after connection to the server has been lost (multithreaded mode).
+     *
+     * @throws Exception If error occurs.
+     */
+    public void testClientReconnectMultithreaded() throws Exception {
+        final ConcurrentLinkedQueue<FileSystem> q = new ConcurrentLinkedQueue<>();
+
+        Configuration cfg = new Configuration();
+
+        for (Map.Entry<String, String> entry : primaryFsCfg)
+            cfg.set(entry.getKey(), entry.getValue());
+
+        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        final int nClients = 16;
+
+        // Initialize clients.
+        for (int i = 0; i < nClients; i++)
+            q.add(FileSystem.get(primaryFsUri, cfg));
+
+        G.stopAll(true); // Stop the server.
+
+        startNodes(); // Start server again.
+
+        GridTestUtils.runMultiThreaded(new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                FileSystem fs = q.poll();
+
+                try {
+                    // Check that client is again operational.
+                    assertTrue(fs.mkdirs(new Path("igfs:///" + Thread.currentThread().getName())));
+
+                    return true;
+                }
+                finally {
+                    U.closeQuiet(fs);
+                }
+            }
+        }, nClients, "test-client");
+    }
+
+    /**
+     * Checks consistency of create --> open --> append --> open operations with different buffer sizes.
+     *
+     * @param createBufSize Buffer size used for file creation.
+     * @param writeCntsInCreate Count of times to write in file creation.
+     * @param openAfterCreateBufSize Buffer size used for file opening after creation.
+     * @param appendBufSize Buffer size used for file appending.
+     * @param writeCntsInAppend Count of times to write in file appending.
+     * @param openAfterAppendBufSize Buffer size used for file opening after appending.
+     * @throws Exception If failed.
+     */
+    private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize,
+        int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception {
+        final Path igfsHome = new Path(primaryFsUri);
+
+        Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile");
+
+        if (createBufSize == -1)
+            createBufSize = fs.getServerDefaults().getFileBufferSize();
+
+        if (appendBufSize == -1)
+            appendBufSize = fs.getServerDefaults().getFileBufferSize();
+
+        FSDataOutputStream os = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
+            Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.bufferSize(createBufSize));
+
+        for (int i = 0; i < writeCntsInCreate; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        FSDataInputStream is = fs.open(file, openAfterCreateBufSize);
+
+        for (int i = 0; i < writeCntsInCreate; i++)
+            assertEquals(i, is.readInt());
+
+        is.close();
+
+        os = fs.create(file, EnumSet.of(CreateFlag.APPEND),
+            Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.bufferSize(appendBufSize));
+
+        for (int i = writeCntsInCreate; i < writeCntsInCreate + writeCntsInAppend; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        is = fs.open(file, openAfterAppendBufSize);
+
+        for (int i = 0; i < writeCntsInCreate + writeCntsInAppend; i++)
+            assertEquals(i, is.readInt());
+
+        is.close();
+    }
+
+    /**
+     * Test expected failures for 'close' operation.
+     *
+     * @param fs File system to test.
+     * @param msg Expected exception message.
+     */
+    public void assertCloseFails(final FileSystem fs, String msg) {
+        GridTestUtils.assertThrows(log, new Callable() {
+            @Override public Object call() throws Exception {
+                fs.close();
+
+                return null;
+            }
+        }, IOException.class, msg);
+    }
+
+    /**
+     * Test expected failures for 'get content summary' operation.
+     *
+     * @param fs File system to test.
+     * @param path Path to evaluate content summary for.
+     */
+    private void assertContentSummaryFails(final FileSystem fs, final Path path) {
+        GridTestUtils.assertThrows(log, new Callable<ContentSummary>() {
+            @Override public ContentSummary call() throws Exception {
+                return fs.getContentSummary(path);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /**
+     * Assert that a given path exists in a given FileSystem.
+     *
+     * @param fs FileSystem to check.
+     * @param p Path to check.
+     * @throws IOException if the path does not exist.
+     */
+    private void assertPathExists(AbstractFileSystem fs, Path p) throws IOException {
+        FileStatus fileStatus = fs.getFileStatus(p);
+
+        assertEquals(p, fileStatus.getPath());
+        assertNotSame(0, fileStatus.getModificationTime());
+    }
+
+    /**
+     * Check path does not exist in a given FileSystem.
+     *
+     * @param fs FileSystem to check.
+     * @param path Path to check.
+     */
+    private void assertPathDoesNotExist(final AbstractFileSystem fs, final Path path) {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(path);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** Helper class to encapsulate source and destination folders. */
+    @SuppressWarnings({"PublicInnerClass", "PublicField"})
+    public static final class Config {
+        /** Source file system. */
+        public final AbstractFileSystem srcFs;
+
+        /** Source path to work with. */
+        public final Path src;
+
+        /** Destination file system. */
+        public final AbstractFileSystem destFs;
+
+        /** Destination path to work with. */
+        public final Path dest;
+
+        /**
+         * Copying task configuration.
+         *
+         * @param srcFs Source file system.
+         * @param src Source path.
+         * @param destFs Destination file system.
+         * @param dest Destination path.
+         */
+        public Config(AbstractFileSystem srcFs, Path src, AbstractFileSystem destFs, Path dest) {
+            this.srcFs = srcFs;
+            this.src = src;
+            this.destFs = destFs;
+            this.dest = dest;
+        }
+    }
+
+    /**
+     * Convert path for exception message testing purposes.
+     *
+     * @param path Path.
+     * @return Converted path.
+     * @throws Exception If failed.
+     */
+    private Path convertPath(Path path) throws Exception {
+        if (mode != PROXY)
+            return path;
+        else {
+            URI secondaryUri = new URI(secondaryFileSystemUriPath());
+
+            URI pathUri = path.toUri();
+
+            return new Path(new URI(pathUri.getScheme() != null ? secondaryUri.getScheme() : null,
+                pathUri.getAuthority() != null ? secondaryUri.getAuthority() : null, pathUri.getPath(), null, null));
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
new file mode 100644
index 0000000..ff5cd5b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * Tests Hadoop 2.x file system in primary mode.
+ */
+public class HadoopIgfs20FileSystemLoopbackPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest {
+    /**
+     * Creates test in primary mode.
+     */
+    public HadoopIgfs20FileSystemLoopbackPrimarySelfTest() {
+        super(PRIMARY);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String primaryFileSystemUriPath() {
+        return "igfs://igfs:" + getTestGridName(0) + "@/";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String primaryFileSystemConfigPath() {
+        return "/modules/core/src/test/config/hadoop/core-site-loopback.xml";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
+        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
+
+        cfg.setType(IgfsIpcEndpointType.TCP);
+        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String secondaryFileSystemUriPath() {
+        assert false;
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String secondaryFileSystemConfigPath() {
+        assert false;
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration() {
+        assert false;
+
+        return null;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
new file mode 100644
index 0000000..2bc9eb8
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * Tests Hadoop 2.x file system in primary mode.
+ */
+public class HadoopIgfs20FileSystemShmemPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest {
+    /**
+     * Creates test in primary mode.
+     */
+    public HadoopIgfs20FileSystemShmemPrimarySelfTest() {
+        super(PRIMARY);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String primaryFileSystemUriPath() {
+        return "igfs://igfs:" + getTestGridName(0) + "@/";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String primaryFileSystemConfigPath() {
+        return "/modules/core/src/test/config/hadoop/core-site.xml";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
+        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
+
+        cfg.setType(IgfsIpcEndpointType.SHMEM);
+        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String secondaryFileSystemUriPath() {
+        assert false;
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String secondaryFileSystemConfigPath() {
+        assert false;
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration() {
+        assert false;
+
+        return null;
+    }
+}
\ No newline at end of file


[05/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
deleted file mode 100644
index f793ec3..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
+++ /dev/null
@@ -1,2432 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ignite.GridTestIoUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEx;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.lang.GridAbsPredicate;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ThreadLocalRandom8;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.reflect.Field;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayDeque;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Deque;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * Test hadoop file system implementation.
- */
-@SuppressWarnings("all")
-public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonAbstractTest {
-    /** Primary file system authority. */
-    private static final String PRIMARY_AUTHORITY = "igfs:grid0@";
-
-    /** Primary file systme URI. */
-    private static final String PRIMARY_URI = "igfs://" + PRIMARY_AUTHORITY + "/";
-
-    /** Secondary file system authority. */
-    private static final String SECONDARY_AUTHORITY = "igfs_secondary:grid_secondary@127.0.0.1:11500";
-
-    /** Secondary file systme URI. */
-    private static final String SECONDARY_URI = "igfs://" + SECONDARY_AUTHORITY + "/";
-
-    /** Secondary file system configuration path. */
-    private static final String SECONDARY_CFG_PATH = "/work/core-site-test.xml";
-
-    /** Secondary file system user. */
-    private static final String SECONDARY_FS_USER = "secondary-default";
-
-    /** Secondary endpoint configuration. */
-    protected static final IgfsIpcEndpointConfiguration SECONDARY_ENDPOINT_CFG;
-
-    /** Group size. */
-    public static final int GRP_SIZE = 128;
-
-    /** Path to the default hadoop configuration. */
-    public static final String HADOOP_FS_CFG = "examples/config/filesystem/core-site.xml";
-
-    /** Thread count for multithreaded tests. */
-    private static final int THREAD_CNT = 8;
-
-    /** IP finder. */
-    private final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Barrier for multithreaded tests. */
-    private static CyclicBarrier barrier;
-
-    /** File system. */
-    private static FileSystem fs;
-
-    /** Default IGFS mode. */
-    protected final IgfsMode mode;
-
-    /** Skip embedded mode flag. */
-    private final boolean skipEmbed;
-
-    /** Skip local shmem flag. */
-    private final boolean skipLocShmem;
-
-    /** Endpoint. */
-    private final String endpoint;
-
-    /** Primary file system URI. */
-    protected URI primaryFsUri;
-
-    /** Primary file system configuration. */
-    protected Configuration primaryFsCfg;
-
-    static {
-        SECONDARY_ENDPOINT_CFG = new IgfsIpcEndpointConfiguration();
-
-        SECONDARY_ENDPOINT_CFG.setType(IgfsIpcEndpointType.TCP);
-        SECONDARY_ENDPOINT_CFG.setPort(11500);
-    }
-
-    /** File statuses comparator. */
-    private static final Comparator<FileStatus> STATUS_COMPARATOR = new Comparator<FileStatus>() {
-        @SuppressWarnings("deprecation")
-        @Override public int compare(FileStatus o1, FileStatus o2) {
-            if (o1 == null || o2 == null)
-                return o1 == o2 ? 0 : o1 == null ? -1 : 1;
-
-            return o1.isDir() == o2.isDir() ? o1.getPath().compareTo(o2.getPath()) : o1.isDir() ? -1 : 1;
-        }
-    };
-
-    /**
-     * Constructor.
-     *
-     * @param mode Default IGFS mode.
-     * @param skipEmbed Whether to skip embedded mode.
-     * @param skipLocShmem Whether to skip local shmem mode.
-     * @param skipLocTcp Whether to skip local TCP mode.
-     */
-    protected IgniteHadoopFileSystemAbstractSelfTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) {
-        this.mode = mode;
-        this.skipEmbed = skipEmbed;
-        this.skipLocShmem = skipLocShmem;
-
-        endpoint = skipLocShmem ? "127.0.0.1:10500" : "shmem:10500";
-    }
-
-    /**
-     * Gets the user the Fs client operates on bahalf of.
-     * @return The user the Fs client operates on bahalf of.
-     */
-    protected String getClientFsUser() {
-        return "foo";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        Configuration secondaryConf = configuration(SECONDARY_AUTHORITY, true, true);
-
-        secondaryConf.setInt("fs.igfs.block.size", 1024);
-
-        String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
-
-        File file = new File(path);
-
-        try (FileOutputStream fos = new FileOutputStream(file)) {
-            secondaryConf.writeXml(fos);
-        }
-
-        startNodes();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return 10 * 60 * 1000;
-    }
-
-    /**
-     * Starts the nodes for this test.
-     *
-     * @throws Exception If failed.
-     */
-    private void startNodes() throws Exception {
-        if (mode != PRIMARY) {
-            // Start secondary IGFS.
-            FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-            igfsCfg.setDataCacheName("partitioned");
-            igfsCfg.setMetaCacheName("replicated");
-            igfsCfg.setName("igfs_secondary");
-            igfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG);
-            igfsCfg.setBlockSize(512 * 1024);
-            igfsCfg.setPrefetchBlocks(1);
-
-            CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-            cacheCfg.setName("partitioned");
-            cacheCfg.setCacheMode(PARTITIONED);
-            cacheCfg.setNearConfiguration(null);
-            cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-            cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-            cacheCfg.setBackups(0);
-            cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-            CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-            metaCacheCfg.setName("replicated");
-            metaCacheCfg.setCacheMode(REPLICATED);
-            metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-            metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-            IgniteConfiguration cfg = new IgniteConfiguration();
-
-            cfg.setGridName("grid_secondary");
-
-            TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-            discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-            cfg.setDiscoverySpi(discoSpi);
-            cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-            cfg.setFileSystemConfiguration(igfsCfg);
-            cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-
-            G.start(cfg);
-        }
-
-        startGrids(4);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-
-        String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
-
-        new File(path).delete();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        primaryFsUri = new URI(PRIMARY_URI);
-
-        primaryFsCfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem);
-
-        UserGroupInformation clientUgi = UserGroupInformation.getBestUGI(null, getClientFsUser());
-        assertNotNull(clientUgi);
-
-        // Create the Fs on behalf of the specific user:
-        clientUgi.doAs(new PrivilegedExceptionAction<Object>() {
-            @Override public Object run() throws Exception {
-                fs = FileSystem.get(primaryFsUri, primaryFsCfg);
-
-                return null;
-            }
-        });
-
-        barrier = new CyclicBarrier(THREAD_CNT);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        try {
-            HadoopIgfsUtils.clear(fs);
-        }
-        catch (Exception ignore) {
-            // No-op.
-        }
-
-        U.closeQuiet(fs);
-    }
-
-    /**
-     * Get primary IPC endpoint configuration.
-     *
-     * @param gridName Grid name.
-     * @return IPC primary endpoint configuration.
-     */
-    protected abstract IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(String gridName);
-
-    /** {@inheritDoc} */
-    @Override public String getTestGridName() {
-        return "grid";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(IP_FINDER);
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(cacheConfiguration(gridName));
-        cfg.setFileSystemConfiguration(igfsConfiguration(gridName));
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-
-        return cfg;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @param gridName Grid name.
-     * @return Cache configuration.
-     */
-    protected CacheConfiguration[] cacheConfiguration(String gridName) {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
-    }
-
-    /**
-     * Gets IGFS configuration.
-     *
-     * @param gridName Grid name.
-     * @return IGFS configuration.
-     */
-    protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException {
-        FileSystemConfiguration cfg = new FileSystemConfiguration();
-
-        cfg.setDataCacheName("partitioned");
-        cfg.setMetaCacheName("replicated");
-        cfg.setName("igfs");
-        cfg.setPrefetchBlocks(1);
-        cfg.setDefaultMode(mode);
-
-        if (mode != PRIMARY) {
-            CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
-
-            fac.setUri(SECONDARY_URI);
-            fac.setConfigPaths(SECONDARY_CFG_PATH);
-
-            IgniteHadoopIgfsSecondaryFileSystem sec = new IgniteHadoopIgfsSecondaryFileSystem();
-
-            sec.setFileSystemFactory(fac);
-            sec.setDefaultUserName(SECONDARY_FS_USER);
-
-            // NB: start() will be invoked upon IgfsImpl init.
-            cfg.setSecondaryFileSystem(sec);
-        }
-
-        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
-
-        cfg.setManagementPort(-1);
-        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
-
-        return cfg;
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetUriIfFSIsNotInitialized() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return new IgniteHadoopFileSystem().getUri();
-            }
-        }, IllegalStateException.class,
-            "URI is null (was IgniteHadoopFileSystem properly initialized?)");
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("NullableProblems")
-    public void testInitializeCheckParametersNameIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                new IgniteHadoopFileSystem().initialize(null, new Configuration());
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: name");
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("NullableProblems")
-    public void testInitializeCheckParametersCfgIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                new IgniteHadoopFileSystem().initialize(new URI(""), null);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: cfg");
-    }
-
-    /** @throws Exception If failed. */
-    public void testInitialize() throws Exception {
-        final IgniteHadoopFileSystem fs = new IgniteHadoopFileSystem();
-
-        fs.initialize(primaryFsUri, primaryFsCfg);
-
-        // Check repeatable initialization.
-        try {
-            fs.initialize(primaryFsUri, primaryFsCfg);
-
-            fail();
-        }
-        catch (IOException e) {
-            assertTrue(e.getMessage().contains("File system is already initialized"));
-        }
-
-        assertEquals(primaryFsUri, fs.getUri());
-
-        assertEquals(0, fs.getUsed());
-
-        fs.close();
-    }
-
-    /**
-     * Test how IPC cache map works.
-     *
-     * @throws Exception If failed.
-     */
-    public void testIpcCache() throws Exception {
-        HadoopIgfsEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop");
-
-        if (hadoop instanceof HadoopIgfsOutProc) {
-            FileSystem fsOther = null;
-
-            try {
-                Field field = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache");
-
-                field.setAccessible(true);
-
-                Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>)field.get(null);
-
-                Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem);
-
-                // we disable caching in order to obtain new FileSystem instance.
-                cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-                // Initial cache size.
-                int initSize = cache.size();
-
-                // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
-                fsOther = FileSystem.get(new URI(PRIMARY_URI), cfg);
-
-                assert fs != fsOther;
-
-                assertEquals(initSize, cache.size());
-
-                fsOther.close();
-
-                assertEquals(initSize, cache.size());
-
-                Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping");
-
-                stopField.setAccessible(true);
-
-                HadoopIgfsIpcIo io = null;
-
-                for (Map.Entry<String, HadoopIgfsIpcIo> ioEntry : cache.entrySet()) {
-                    if (endpoint.contains(ioEntry.getKey())) {
-                        io = ioEntry.getValue();
-
-                        break;
-                    }
-                }
-
-                assert io != null;
-
-                assert !(Boolean)stopField.get(io);
-
-                // Ensure that IO is stopped when nobody else is need it.
-                fs.close();
-
-                assert initSize >= cache.size();
-
-                assert (Boolean)stopField.get(io);
-            }
-            finally {
-                U.closeQuiet(fsOther);
-            }
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testCloseIfNotInitialized() throws Exception {
-        final FileSystem fs = new IgniteHadoopFileSystem();
-
-        // Check close makes nothing harmful.
-        fs.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testClose() throws Exception {
-        final Path path = new Path("dir");
-
-        fs.close();
-
-        // Check double close makes nothing harmful.
-        fs.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Nullable @Override public Object call() throws Exception {
-                fs.initialize(primaryFsUri, primaryFsCfg);
-
-                return null;
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Nullable @Override public Object call() throws Exception {
-                fs.setPermission(path, FsPermission.createImmutable((short)777));
-
-                return null;
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Nullable @Override public Object call() throws Exception {
-                fs.setOwner(path, "user", "group");
-
-                return null;
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.open(path, 256);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(path);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.append(path);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.rename(path, new Path("newDir"));
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.delete(path, true);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.listStatus(path);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.mkdirs(path);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(path);
-            }
-        }, IOException.class, "File system is stopped.");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileBlockLocations(new FileStatus(1L, false, 1, 1L, 1L, new Path("path")), 0L, 256L);
-            }
-        }, IOException.class, "File system is stopped.");
-    }
-
-    /** @throws Exception If failed. */
-    public void testCreateCheckParameters() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("deprecation")
-    public void testCreateBase() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-        Path file = new Path(dir, "someFile");
-
-        assertPathDoesNotExist(fs, file);
-
-        FsPermission fsPerm = new FsPermission((short)644);
-
-        FSDataOutputStream os = fs.create(file, fsPerm, false, 1, (short)1, 1L, null);
-
-        // Try to write something in file.
-        os.write("abc".getBytes());
-
-        os.close();
-
-        // Check file status.
-        FileStatus fileStatus = fs.getFileStatus(file);
-
-        assertFalse(fileStatus.isDir());
-        assertEquals(file, fileStatus.getPath());
-        assertEquals(fsPerm, fileStatus.getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("deprecation")
-    public void testCreateCheckOverwrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-        final Path file = new Path(dir, "someFile");
-
-        FSDataOutputStream out = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
-            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
-
-        out.close();
-
-        // Check intermediate directory permissions.
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(file, FsPermission.getDefault(), false, 1024, (short)1, 2048, null);
-            }
-        }, PathExistsException.class, null);
-
-        // Overwrite should be successful.
-        FSDataOutputStream out1 = fs.create(file, true);
-
-        out1.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfNoSuchPath() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        assertPathDoesNotExist(fs, dir);
-
-        assertFalse(fs.delete(dir, true));
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file, false, 128);
-
-        final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks.
-
-        for (int i = 0; i < cnt; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        final FSDataInputStream is = fs.open(file, -1);
-
-        for (int i = 0; i < cnt / 2; i++)
-            assertEquals(i, is.readInt());
-
-        assert fs.delete(file, false);
-
-        assert !fs.exists(file);
-
-        is.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfFilePathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        assertTrue(fs.delete(file, false));
-
-        assertPathDoesNotExist(fs, file);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfDirectoryPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(dir);
-
-        os.close();
-
-        assertTrue(fs.delete(dir, false));
-
-        assertPathDoesNotExist(fs, dir);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteFailsIfNonRecursive() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        fs.create(someDir3).close();
-
-        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
-
-        assertFalse(fs.delete(someDir2, false));
-
-        assertPathExists(fs, someDir2);
-        assertPathExists(fs, someDir3);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteRecursively() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(someDir3);
-
-        os.close();
-
-        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
-
-        assertTrue(fs.delete(someDir2, true));
-
-        assertPathDoesNotExist(fs, someDir2);
-        assertPathDoesNotExist(fs, someDir3);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteRecursivelyFromRoot() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(someDir3);
-
-        os.close();
-
-        Path root = new Path(fsHome, "/");
-
-        assertFalse(fs.delete(root, true));
-        assertTrue(fs.delete(new Path("/someDir1"), true));
-
-        assertPathDoesNotExist(fs, someDir3);
-        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
-        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
-        assertPathExists(fs, root);
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("deprecation")
-    public void testSetPermissionCheckDefaultPermission() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
-            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
-
-        os.close();
-
-        fs.setPermission(file, null);
-
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("deprecation")
-    public void testSetPermissionCheckNonRecursiveness() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
-            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
-
-        os.close();
-
-        Path tmpDir = new Path(fsHome, "/tmp");
-
-        FsPermission perm = new FsPermission((short)123);
-
-        fs.setPermission(tmpDir, perm);
-
-        assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("OctalInteger")
-    public void testSetPermission() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        for (short i = 0; i <= 0777; i += 7) {
-            FsPermission perm = new FsPermission(i);
-
-            fs.setPermission(file, perm);
-
-            assertEquals(perm, fs.getFileStatus(file).getPermission());
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FsPermission perm = new FsPermission((short)123);
-
-        FSDataOutputStream os = fs.create(file);
-
-        fs.setPermission(file, perm);
-
-        os.close();
-
-        assertEquals(perm, fs.getFileStatus(file).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.setOwner(null, "aUser", "aGroup");
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: p");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersUserIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.setOwner(file, null, "aGroup");
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: username");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersGroupIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-                fs.setOwner(file, "aUser", null);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: grpName");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwner() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
-
-        fs.setOwner(file, "aUser", "aGroup");
-
-        assertEquals("aUser", fs.getFileStatus(file).getOwner());
-        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testSetTimes() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/heartbeat");
-
-        fs.create(file).close();
-
-        FileStatus status = fs.getFileStatus(file);
-
-        assertTrue(status.getAccessTime() > 0);
-        assertTrue(status.getModificationTime() > 0);
-
-        long mtime = System.currentTimeMillis() - 5000;
-        long atime = System.currentTimeMillis() - 4000;
-
-        fs.setTimes(file, mtime, atime);
-
-        status = fs.getFileStatus(file);
-
-        assertEquals(mtime, status.getModificationTime());
-        assertEquals(atime, status.getAccessTime());
-
-        mtime -= 5000;
-
-        fs.setTimes(file, mtime, -1);
-
-        status = fs.getFileStatus(file);
-
-        assertEquals(mtime, status.getModificationTime());
-        assertEquals(atime, status.getAccessTime());
-
-        atime -= 5000;
-
-        fs.setTimes(file, -1, atime);
-
-        status = fs.getFileStatus(file);
-
-        assertEquals(mtime, status.getModificationTime());
-        assertEquals(atime, status.getAccessTime());
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testSetOwnerIfOutputStreamIsNotClosed() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file);
-
-        fs.setOwner(file, "aUser", "aGroup");
-
-        os.close();
-
-        assertEquals("aUser", fs.getFileStatus(file).getOwner());
-        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckNonRecursiveness() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        Path tmpDir = new Path(fsHome, "/tmp");
-
-        fs.setOwner(file, "fUser", "fGroup");
-        fs.setOwner(tmpDir, "dUser", "dGroup");
-
-        assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
-        assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
-
-        assertEquals("fUser", fs.getFileStatus(file).getOwner());
-        assertEquals("fGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenCheckParametersPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.open(null, 1024);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenNoSuchPath() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.open(file, 1024);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenIfPathIsAlreadyOpened() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        FSDataInputStream is1 = fs.open(file);
-        FSDataInputStream is2 = fs.open(file);
-
-        is1.close();
-        is2.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpen() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        int cnt = 2 * 1024;
-
-        try (FSDataOutputStream out = fs.create(file, true, 1024)) {
-
-            for (long i = 0; i < cnt; i++)
-                out.writeLong(i);
-        }
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
-
-        try (FSDataInputStream in = fs.open(file, 1024)) {
-
-            for (long i = 0; i < cnt; i++)
-                assertEquals(i, in.readLong());
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppendCheckParametersPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.append(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppendIfPathPointsToDirectory() throws Exception {
-        final Path fsHome = new Path(primaryFsUri);
-        final Path dir = new Path(fsHome, "/tmp");
-        Path file = new Path(dir, "my");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.append(new Path(fsHome, dir), 1024);
-            }
-        }, IOException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        FSDataOutputStream os = fs.create(file);
-
-        os.close();
-
-        FSDataOutputStream appendOs = fs.append(file);
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-                return fs.append(file);
-            }
-        }, IOException.class, null);
-
-        appendOs.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppend() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        int cnt = 1024;
-
-        FSDataOutputStream out = fs.create(file, true, 1024);
-
-        for (int i = 0; i < cnt; i++)
-            out.writeLong(i);
-
-        out.close();
-
-        out = fs.append(file);
-
-        for (int i = cnt; i < cnt * 2; i++)
-            out.writeLong(i);
-
-        out.close();
-
-        FSDataInputStream in = fs.open(file, 1024);
-
-        for (int i = 0; i < cnt * 2; i++)
-            assertEquals(i, in.readLong());
-
-        in.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameCheckParametersSrcPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.rename(null, file);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: src");
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameCheckParametersDstPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-                return fs.rename(file, null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: dst");
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathDoesNotExist() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        assertPathDoesNotExist(fs, srcFile);
-
-        assertFalse(fs.rename(srcFile, dstFile));
-
-        assertPathDoesNotExist(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathIsAlreadyBeingOpenedToWrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile);
-
-        os.close();
-
-        os = fs.append(srcFile);
-
-        assertTrue(fs.rename(srcFile, dstFile));
-
-        assertPathExists(fs, dstFile);
-
-        String testStr = "Test";
-
-        try {
-            os.writeBytes(testStr);
-        }
-        finally {
-            os.close();
-        }
-
-        try (FSDataInputStream is = fs.open(dstFile)) {
-            byte[] buf = new byte[testStr.getBytes().length];
-
-            is.readFully(buf);
-
-            assertEquals(testStr, new String(buf));
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameFileIfDstPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile);
-
-        os.close();
-
-        os = fs.create(dstFile);
-
-        os.close();
-
-        assertFalse(fs.rename(srcFile, dstFile));
-
-        assertPathExists(fs, srcFile);
-        assertPathExists(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameFile() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "/tmp/srcFile");
-        Path dstFile = new Path(fsHome, "/tmp/dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile);
-
-        os.close();
-
-        assertTrue(fs.rename(srcFile, dstFile));
-
-        assertPathDoesNotExist(fs, srcFile);
-        assertPathExists(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile);
-
-        int cnt = 1024;
-
-        for (int i = 0; i < cnt; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        FSDataInputStream is = fs.open(srcFile);
-
-        for (int i = 0; i < cnt; i++) {
-            if (i == 100)
-                // Rename file during the read process.
-                assertTrue(fs.rename(srcFile, dstFile));
-
-            assertEquals(i, is.readInt());
-        }
-
-        assertPathDoesNotExist(fs, srcFile);
-        assertPathExists(fs, dstFile);
-
-        os.close();
-        is.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameDirectoryIfDstPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcDir = new Path(fsHome, "/tmp/");
-        Path dstDir = new Path(fsHome, "/tmpNew/");
-
-        FSDataOutputStream os = fs.create(new Path(srcDir, "file1"));
-
-        os.close();
-
-        os = fs.create(new Path(dstDir, "file2"));
-
-        os.close();
-
-        assertTrue("Rename succeeded [srcDir=" + srcDir + ", dstDir=" + dstDir + ']', fs.rename(srcDir, dstDir));
-
-        assertPathExists(fs, dstDir);
-        assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp"));
-        assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp/file1"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameDirectory() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/tmp/");
-        Path newDir = new Path(fsHome, "/tmpNew/");
-
-        FSDataOutputStream os = fs.create(new Path(dir, "myFile"));
-
-        os.close();
-
-        assertTrue("Rename failed [dir=" + dir + ", newDir=" + newDir + ']', fs.rename(dir, newDir));
-
-        assertPathDoesNotExist(fs, dir);
-        assertPathExists(fs, newDir);
-    }
-
-    /** @throws Exception If failed. */
-    public void testListStatusIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.listStatus((Path)null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testListStatusIfPathDoesNotExist() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-                @Override public Object call() throws Exception {
-                    return fs.listStatus(new Path("/tmp/some/dir"));
-                }
-            }, FileNotFoundException.class, null);
-    }
-
-    /**
-     * Test directory listing.
-     *
-     * @throws Exception If failed.
-     */
-    public void testListStatus() throws Exception {
-        Path igfsHome = new Path(PRIMARY_URI);
-
-        // Test listing of an empty directory.
-        Path dir = new Path(igfsHome, "dir");
-
-        assert fs.mkdirs(dir);
-
-        FileStatus[] list = fs.listStatus(dir);
-
-        assert list.length == 0;
-
-        // Test listing of a not empty directory.
-        Path subDir = new Path(dir, "subDir");
-
-        assert fs.mkdirs(subDir);
-
-        Path file = new Path(dir, "file");
-
-        FSDataOutputStream fos = fs.create(file);
-
-        fos.close();
-
-        list = fs.listStatus(dir);
-
-        assert list.length == 2;
-
-        String listRes1 = list[0].getPath().getName();
-        String listRes2 = list[1].getPath().getName();
-
-        assert "subDir".equals(listRes1) && "file".equals(listRes2) || "subDir".equals(listRes2) &&
-            "file".equals(listRes1);
-
-        // Test listing of a file.
-        list = fs.listStatus(file);
-
-        assert list.length == 1;
-
-        assert "file".equals(list[0].getPath().getName());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetWorkingDirectoryIfPathIsNull() throws Exception {
-        fs.setWorkingDirectory(null);
-
-        Path file = new Path("file");
-
-        FSDataOutputStream os = fs.create(file);
-        os.close();
-
-        String path = fs.getFileStatus(file).getPath().toString();
-
-        assertTrue(path.endsWith("/user/" + getClientFsUser() + "/file"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetWorkingDirectoryIfPathDoesNotExist() throws Exception {
-        // Should not throw any exceptions.
-        fs.setWorkingDirectory(new Path("/someDir"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetWorkingDirectory() throws Exception {
-        Path dir = new Path("/tmp/nested/dir");
-        Path file = new Path("file");
-
-        fs.mkdirs(dir);
-
-        fs.setWorkingDirectory(dir);
-
-        FSDataOutputStream os = fs.create(file);
-        os.close();
-
-        String filePath = fs.getFileStatus(new Path(dir, file)).getPath().toString();
-
-        assertTrue(filePath.contains("/tmp/nested/dir/file"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetWorkingDirectoryIfDefault() throws Exception {
-        String path = fs.getWorkingDirectory().toString();
-
-        assertTrue(path.endsWith("/user/" + getClientFsUser()));
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetWorkingDirectory() throws Exception {
-        Path dir = new Path("/tmp/some/dir");
-
-        fs.mkdirs(dir);
-
-        fs.setWorkingDirectory(dir);
-
-        String path = fs.getWorkingDirectory().toString();
-
-        assertTrue(path.endsWith("/tmp/some/dir"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testMkdirsIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.mkdirs(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testMkdirsIfPermissionIsNull() throws Exception {
-        Path dir = new Path("/tmp");
-
-        assertTrue(fs.mkdirs(dir, null));
-
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("OctalInteger")
-    public void testMkdirs() throws Exception {
-        Path fsHome = new Path(PRIMARY_URI);
-        final Path dir = new Path(fsHome, "/tmp/staging");
-        final Path nestedDir = new Path(dir, "nested");
-
-        final FsPermission dirPerm = FsPermission.createImmutable((short)0700);
-        final FsPermission nestedDirPerm = FsPermission.createImmutable((short)111);
-
-        assertTrue(fs.mkdirs(dir, dirPerm));
-        assertTrue(fs.mkdirs(nestedDir, nestedDirPerm));
-
-        assertEquals(dirPerm, fs.getFileStatus(dir).getPermission());
-        assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission());
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner());
-        assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner());
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileStatusIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileStatusIfPathDoesNotExist() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(new Path("someDir"));
-            }
-        }, FileNotFoundException.class, "File not found: someDir");
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocationsIfFileStatusIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                // Argument is checked by Hadoop.
-                return fs.getFileBlockLocations((Path)null, 1, 2);
-            }
-        }, NullPointerException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception {
-        Path path = new Path("someFile");
-
-        fs.create(path).close();
-
-        final FileStatus status = fs.getFileStatus(path);
-
-        fs.delete(path, true);
-
-        BlockLocation[] locations = fs.getFileBlockLocations(status, 1, 2);
-
-        assertEquals(0, locations.length);
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocations() throws Exception {
-        Path igfsHome = new Path(PRIMARY_URI);
-
-        Path file = new Path(igfsHome, "someFile");
-
-        try (OutputStream out = new BufferedOutputStream(fs.create(file, true, 1024 * 1024))) {
-            byte[] data = new byte[128 * 1024];
-
-            for (int i = 0; i < 100; i++)
-                out.write(data);
-
-            out.flush();
-        }
-
-        try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
-            byte[] data = new byte[128 * 1024];
-
-            int read;
-
-            do {
-                read = in.read(data);
-            }
-            while (read > 0);
-        }
-
-        FileStatus status = fs.getFileStatus(file);
-
-        int grpLen = 128 * 512 * 1024;
-
-        int grpCnt = (int)((status.getLen() + grpLen - 1) / grpLen);
-
-        BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
-
-        assertEquals(grpCnt, locations.length);
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("deprecation")
-    public void testGetDefaultBlockSize() throws Exception {
-        assertEquals(1L << 26, fs.getDefaultBlockSize());
-    }
-
-    /** @throws Exception If failed. */
-    public void testZeroReplicationFactor() throws Exception {
-        // This test doesn't make sense for any mode except of PRIMARY.
-        if (mode == PRIMARY) {
-            Path igfsHome = new Path(PRIMARY_URI);
-
-            Path file = new Path(igfsHome, "someFile");
-
-            try (FSDataOutputStream out = fs.create(file, (short)0)) {
-                out.write(new byte[1024 * 1024]);
-            }
-
-            IgniteFileSystem igfs = grid(0).fileSystem("igfs");
-
-            IgfsPath filePath = new IgfsPath("/someFile");
-
-            IgfsFile fileInfo = igfs.info(filePath);
-
-            awaitPartitionMapExchange();
-
-            Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length());
-
-            assertEquals(1, locations.size());
-
-            IgfsBlockLocation location = F.first(locations);
-
-            assertEquals(1, location.nodeIds().size());
-        }
-    }
-
-    /**
-     * Ensure that when running in multithreaded mode only one create() operation succeed.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedCreate() throws Exception {
-        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
-
-        assert fs.mkdirs(dir);
-
-        final Path file = new Path(dir, "file");
-
-        fs.create(file).close();
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override
-            public void run() {
-                int idx = cnt.getAndIncrement();
-
-                byte[] data = new byte[256];
-
-                Arrays.fill(data, (byte)idx);
-
-                FSDataOutputStream os = null;
-
-                try {
-                    os = fs.create(file, true);
-                }
-                catch (IOException ignore) {
-                    errs.add(idx);
-                }
-
-                U.awaitQuiet(barrier);
-
-                try {
-                    if (os != null)
-                        os.write(data);
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-                finally {
-                    U.closeQuiet(os);
-                }
-            }
-        }, THREAD_CNT);
-
-        assert !err.get();
-
-        // Only one thread could obtain write lock on the file.
-        assert errs.size() == THREAD_CNT - 1;
-
-        int idx = -1;
-
-        for (int i = 0; i < THREAD_CNT; i++) {
-            if (!errs.remove(i)) {
-                idx = i;
-
-                break;
-            }
-        }
-
-        byte[] expData = new byte[256];
-
-        Arrays.fill(expData, (byte)idx);
-
-        FSDataInputStream is = fs.open(file);
-
-        byte[] data = new byte[256];
-
-        is.read(data);
-
-        is.close();
-
-        assert Arrays.equals(expData, data) : "Expected=" + Arrays.toString(expData) + ", actual=" +
-            Arrays.toString(data);
-    }
-
-    /**
-     * Ensure that when running in multithreaded mode only one append() operation succeed.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedAppend() throws Exception {
-        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
-
-        assert fs.mkdirs(dir);
-
-        final Path file = new Path(dir, "file");
-
-        fs.create(file).close();
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                int idx = cnt.getAndIncrement();
-
-                byte[] data = new byte[256];
-
-                Arrays.fill(data, (byte)idx);
-
-                U.awaitQuiet(barrier);
-
-                FSDataOutputStream os = null;
-
-                try {
-                    os = fs.append(file);
-                }
-                catch (IOException ignore) {
-                    errs.add(idx);
-                }
-
-                U.awaitQuiet(barrier);
-
-                try {
-                    if (os != null)
-                        os.write(data);
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-                finally {
-                    U.closeQuiet(os);
-                }
-            }
-        }, THREAD_CNT);
-
-        assert !err.get();
-
-        // Only one thread could obtain write lock on the file.
-        assert errs.size() == THREAD_CNT - 1;
-
-        int idx = -1;
-
-        for (int i = 0; i < THREAD_CNT; i++) {
-            if (!errs.remove(i)) {
-                idx = i;
-
-                break;
-            }
-        }
-
-        byte[] expData = new byte[256];
-
-        Arrays.fill(expData, (byte)idx);
-
-        FSDataInputStream is = fs.open(file);
-
-        byte[] data = new byte[256];
-
-        is.read(data);
-
-        is.close();
-
-        assert Arrays.equals(expData, data);
-    }
-
-    /**
-     * Test concurrent reads within the file.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedOpen() throws Exception {
-        final byte[] dataChunk = new byte[256];
-
-        for (int i = 0; i < dataChunk.length; i++)
-            dataChunk[i] = (byte)i;
-
-        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
-
-        assert fs.mkdirs(dir);
-
-        final Path file = new Path(dir, "file");
-
-        FSDataOutputStream os = fs.create(file);
-
-        // Write 256 * 2048 = 512Kb of data.
-        for (int i = 0; i < 2048; i++)
-            os.write(dataChunk);
-
-        os.close();
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override
-            public void run() {
-                FSDataInputStream is = null;
-
-                try {
-                    int pos = ThreadLocalRandom8.current().nextInt(2048);
-
-                    try {
-                        is = fs.open(file);
-                    }
-                    finally {
-                        U.awaitQuiet(barrier);
-                    }
-
-                    is.seek(256 * pos);
-
-                    byte[] buf = new byte[256];
-
-                    for (int i = pos; i < 2048; i++) {
-                        // First perform normal read.
-                        int read = is.read(buf);
-
-                        assert read == 256;
-
-                        Arrays.equals(dataChunk, buf);
-                    }
-
-                    int res = is.read(buf);
-
-                    assert res == -1;
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-                finally {
-                    U.closeQuiet(is);
-                }
-            }
-        }, THREAD_CNT);
-
-        assert !err.get();
-    }
-
-    /**
-     * Test concurrent creation of multiple directories.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedMkdirs() throws Exception {
-        final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
-
-        assert fs.mkdirs(dir);
-
-        final int depth = 3;
-        final int entryCnt = 5;
-
-        final AtomicReference<IOException> err = new AtomicReference();
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-                queue.add(F.t(0, dir));
-
-                U.awaitQuiet(barrier);
-
-                while (!queue.isEmpty()) {
-                    IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-                    int curDepth = t.getKey();
-                    Path curPath = t.getValue();
-
-                    if (curDepth <= depth) {
-                        int newDepth = curDepth + 1;
-
-                        // Create directories.
-                        for (int i = 0; i < entryCnt; i++) {
-                            Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                            try {
-                                if (fs.mkdirs(subDir))
-                                    queue.addLast(F.t(newDepth, subDir));
-                            }
-                            catch (IOException e) {
-                                err.compareAndSet(null, e);
-                            }
-                        }
-                    }
-                }
-            }
-        }, THREAD_CNT);
-
-        // Ensure there were no errors.
-        assert err.get() == null : err.get();
-
-        // Ensure correct folders structure.
-        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-        queue.add(F.t(0, dir));
-
-        while (!queue.isEmpty()) {
-            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-            int curDepth = t.getKey();
-            Path curPath = t.getValue();
-
-            if (curDepth <= depth) {
-                int newDepth = curDepth + 1;
-
-                // Create directories.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                    assert fs.exists(subDir) : "Expected directory doesn't exist: " + subDir;
-
-                    queue.add(F.t(newDepth, subDir));
-                }
-            }
-        }
-    }
-
-    /**
-     * Test concurrent deletion of the same directory with advanced structure.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("TooBroadScope")
-    public void testMultithreadedDelete() throws Exception {
-        final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
-
-        assert fs.mkdirs(dir);
-
-        int depth = 3;
-        int entryCnt = 5;
-
-        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-        queue.add(F.t(0, dir));
-
-        while (!queue.isEmpty()) {
-            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-            int curDepth = t.getKey();
-            Path curPath = t.getValue();
-
-            if (curDepth < depth) {
-                int newDepth = curDepth + 1;
-
-                // Create directories.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                    fs.mkdirs(subDir);
-
-                    queue.addLast(F.t(newDepth, subDir));
-                }
-            }
-            else {
-                // Create files.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path file = new Path(curPath, "file " + i);
-
-                    fs.create(file).close();
-                }
-            }
-        }
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                try {
-                    U.awaitQuiet(barrier);
-
-                    fs.delete(dir, true);
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-            }
-        }, THREAD_CNT);
-
-        // Ensure there were no errors.
-        assert !err.get();
-
-        // Ensure the directory was actually deleted.
-
-        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
-            @Override public boolean apply() {
-                try {
-                    return !fs.exists(dir);
-                }
-                catch (IOException e) {
-                    throw new AssertionError(e);
-                }
-            }
-        }, 5000L);
-    }
-
-    /** @throws Exception If failed. */
-    public void testConsistency() throws Exception {
-        // Default buffers values
-        checkConsistency(-1, 1, -1, -1, 1, -1);
-        checkConsistency(-1, 10, -1, -1, 10, -1);
-        checkConsistency(-1, 100, -1, -1, 100, -1);
-        checkConsistency(-1, 1000, -1, -1, 1000, -1);
-        checkConsistency(-1, 10000, -1, -1, 10000, -1);
-        checkConsistency(-1, 100000, -1, -1, 100000, -1);
-
-        checkConsistency(65 * 1024 + 13, 100000, -1, -1, 100000, -1);
-
-        checkConsistency(-1, 100000, 2 * 4 * 1024 + 17, -1, 100000, -1);
-
-        checkConsistency(-1, 100000, -1, 65 * 1024 + 13, 100000, -1);
-
-        checkConsistency(-1, 100000, -1, -1, 100000, 2 * 4 * 1024 + 17);
-
-        checkConsistency(65 * 1024 + 13, 100000, 2 * 4 * 1024 + 13, 65 * 1024 + 149, 100000, 2 * 4 * 1024 + 157);
-    }
-
-    /**
-     * Verifies that client reconnects after connection to the server has been lost.
-     *
-     * @throws Exception If error occurs.
-     */
-    public void testClientReconnect() throws Exception {
-        Path filePath = new Path(PRIMARY_URI, "file1");
-
-        final FSDataOutputStream s = fs.create(filePath); // Open the stream before stopping IGFS.
-
-        try {
-            G.stopAll(true); // Stop the server.
-
-            startNodes(); // Start server again.
-
-            // Check that client is again operational.
-            assertTrue(fs.mkdirs(new Path(PRIMARY_URI, "dir1/dir2")));
-
-            // However, the streams, opened before disconnect, should not be valid.
-            GridTestUtils.assertThrows(log, new Callable<Object>() {
-                @Nullable @Override public Object call() throws Exception {
-                    s.write("test".getBytes());
-
-                    s.flush(); // Flush data to the broken output stream.
-
-                    return null;
-                }
-            }, IOException.class, null);
-
-            assertFalse(fs.exists(filePath));
-        }
-        finally {
-            U.closeQuiet(s); // Safety.
-        }
-    }
-
-    /**
-     * Verifies that client reconnects after connection to the server has been lost (multithreaded mode).
-     *
-     * @throws Exception If error occurs.
-     */
-    public void testClientReconnectMultithreaded() throws Exception {
-        final ConcurrentLinkedQueue<FileSystem> q = new ConcurrentLinkedQueue<>();
-
-        Configuration cfg = new Configuration();
-
-        for (Map.Entry<String, String> entry : primaryFsCfg)
-            cfg.set(entry.getKey(), entry.getValue());
-
-        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        final int nClients = 1;
-
-        // Initialize clients.
-        for (int i = 0; i < nClients; i++)
-            q.add(FileSystem.get(primaryFsUri, cfg));
-
-        G.stopAll(true); // Stop the server.
-
-        startNodes(); // Start server again.
-
-        GridTestUtils.runMultiThreaded(new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                FileSystem fs = q.poll();
-
-                try {
-                    // Check that client is again operational.
-                    assertTrue(fs.mkdirs(new Path("/" + Thread.currentThread().getName())));
-
-                    return true;
-                }
-                finally {
-                    U.closeQuiet(fs);
-                }
-            }
-        }, nClients, "test-client");
-    }
-
-    /**
-     * Checks consistency of create --> open --> append --> open operations with different buffer sizes.
-     *
-     * @param createBufSize Buffer size used for file creation.
-     * @param writeCntsInCreate Count of times to write in file creation.
-     * @param openAfterCreateBufSize Buffer size used for file opening after creation.
-     * @param appendBufSize Buffer size used for file appending.
-     * @param writeCntsInAppend Count of times to write in file appending.
-     * @param openAfterAppendBufSize Buffer size used for file opening after appending.
-     * @throws Exception If failed.
-     */
-    private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize,
-        int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception {
-        final Path igfsHome = new Path(PRIMARY_URI);
-
-        Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile");
-
-        FSDataOutputStream os = fs.create(file, true, createBufSize);
-
-        for (int i = 0; i < writeCntsInCreate; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        FSDataInputStream is = fs.open(file, openAfterCreateBufSize);
-
-        for (int i = 0; i < writeCntsInCreate; i++)
-            assertEquals(i, is.readInt());
-
-        is.close();
-
-        os = fs.append(file, appendBufSize);
-
-        for (int i = writeCntsInCreate; i < writeCntsInCreate + writeCntsInAppend; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        is = fs.open(file, openAfterAppendBufSize);
-
-        for (int i = 0; i < writeCntsInCreate + writeCntsInAppend; i++)
-            assertEquals(i, is.readInt());
-
-        is.close();
-    }
-
-    /**
-     * Gets instance of Hadoop local file system.
-     *
-     * @param home File system home.
-     * @return File system.
-     * @throws IOException If failed.
-     */
-    private FileSystem local(Path home) throws IOException {
-        Configuration cfg = new Configuration();
-
-        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
-
-        return FileSystem.get(home.toUri(), cfg);
-    }
-
-    /**
-     * Copy files from one FS to another.
-     *
-     * @param msg Info message to display after copying finishes.
-     * @param srcFs Source file system.
-     * @param src Source path to copy from.
-     * @param destFs Destination file system.
-     * @param dest Destination path to copy to.
-     * @throws IOException If failed.
-     */
-    private void copy(String msg, FileSystem srcFs, Path src, FileSystem destFs, Path dest) throws IOException {
-        assert destFs.delete(dest, true) || !destFs.exists(dest) : "Failed to remove: " + dest;
-
-        destFs.mkdirs(dest);
-
-        Configuration conf = new Configuration(true);
-
-        long time = System.currentTimeMillis();
-
-        FileUtil.copy(srcFs, src, destFs, dest, false, true, conf);
-
-        time = System.currentTimeMillis() - time;
-
-        info("Copying finished, " + msg + " [time=" + time + "ms, src=" + src + ", dest=" + dest + ']');
-    }
-
-    /**
-     * Compare content of two folders.
-     *
-     * @param cfg Paths configuration to compare.
-     * @throws IOException If failed.
-     */
-    @SuppressWarnings("deprecation")
-    private void compareContent(Config cfg) throws IOException {
-        Deque<Config> queue = new LinkedList<>();
-
-        queue.add(cfg);
-
-        for (Config c = queue.poll(); c != null; c = queue.poll()) {
-            boolean exists;
-
-            assertEquals("Check existence [src=" + c.src + ", dest=" + c.dest + ']',
-                exists = c.srcFs.exists(c.src), c.destFs.exists(c.dest));
-
-            assertEquals("Check types (files?) [src=" + c.src + ", dest=" + c.dest + ']',
-                c.srcFs.isFile(c.src), c.destFs.isFile(c.dest));
-
-            if (exists) {
-                ContentSummary srcSummary = c.srcFs.getContentSummary(c.src);
-                ContentSummary dstSummary = c.destFs.getContentSummary(c.dest);
-
-                assertEquals("Directories number comparison failed",
-                    srcSummary.getDirectoryCount(), dstSummary.getDirectoryCount());
-
-                assertEquals("Files number comparison failed",
-                    srcSummary.getFileCount(), dstSummary.getFileCount());
-
-                assertEquals("Space consumed comparison failed",
-                    srcSummary.getSpaceConsumed(), dstSummary.getSpaceConsumed());
-
-                assertEquals("Length comparison failed",
-                    srcSummary.getLength(), dstSummary.getLength());
-
-                // Intentionally skipping quotas checks as they can vary.
-            }
-            else {
-                assertContentSummaryFails(c.srcFs, c.src);
-                assertContentSummaryFails(c.destFs, c.dest);
-            }
-
-            if (!exists)
-                continue;
-
-            FileStatus[] srcSt = c.srcFs.listStatus(c.src);
-            FileStatus[] destSt = c.destFs.listStatus(c.dest);
-
-            assert srcSt != null && destSt != null : "Both not null" +
-                " [srcSt=" + Arrays.toString(srcSt) + ", destSt=" + Arrays.toString(destSt) + ']';
-
-            assertEquals("Check listing [src=" + c.src + ", dest=" + c.dest + ']', srcSt.length, destSt.length);
-
-            // Listing of the file returns the only element with this file.
-            if (srcSt.length == 1 && c.src.equals(srcSt[0].getPath())) {
-                assertEquals(c.dest, destSt[0].getPath());
-
-                assertTrue("Expects file [src=" + c.src + ", srcSt[0]=" + srcSt[0] + ']', !srcSt[0].isDir());
-                assertTrue("Expects file [dest=" + c.dest + ", destSt[0]=" + destSt[0] + ']', !destSt[0].isDir());
-
-                FSDataInputStream srcIn = null;
-                FSDataInputStream destIn = null;
-
-                try {
-                    srcIn = c.srcFs.open(c.src);
-                    destIn = c.destFs.open(c.dest);
-
-                    GridTestIoUtils.assertEqualStreams(srcIn, destIn, srcSt[0].getLen());
-                }
-                finally {
-                    U.closeQuiet(srcIn);
-                    U.closeQuiet(destIn);
-                }
-
-                continue; // Skip the following directories validations.
-            }
-
-            // Sort both arrays.
-            Arrays.sort(srcSt, STATUS_COMPARATOR);
-            Arrays.sort(destSt, STATUS_COMPARATOR);
-
-            for (int i = 0; i < srcSt.length; i++)
-                // Dig in deep to the last leaf, instead of collecting full tree in memory.
-                queue.addFirst(new Config(c.srcFs, srcSt[i].getPath(), c.destFs, destSt[i].getPath()));
-
-            // Add non-existent file to check in the current folder.
-            String rndFile = "Non-existent file #" + UUID.randomUUID().toString();
-
-            queue.addFirst(new Config(c.srcFs, new Path(c.src, rndFile), c.destFs, new Path(c.dest, rndFile)));
-        }
-    }
-
-    /**
-     * Test expected failures for 'close' operation.
-     *
-     * @param fs File system to test.
-     * @param msg Expected exception message.
-     */
-    public void assertCloseFails(final FileSystem fs, String msg) {
-        GridTestUtils.assertThrows(log, new Callable() {
-            @Override public Object call() throws Exception {
-                fs.close();
-
-                return null;
-            }
-        }, IOException.class, msg);
-    }
-
-    /**
-     * Test expected failures for 'get content summary' operation.
-     *
-     * @param fs File system to test.
-     * @param path Path to evaluate content summary for.
-     */
-    private void assertContentSummaryFails(final FileSystem fs, final Path path) {
-        GridTestUtils.assertThrows(log, new Callable<ContentSummary>() {
-            @Override public ContentSummary call() throws Exception {
-                return fs.getContentSummary(path);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /**
-     * Assert that a given path exists in a given FileSystem.
-     *
-     * @param fs FileSystem to check.
-     * @param p Path to check.
-     * @throws IOException if the path does not exist.
-     */
-    private void assertPathExists(FileSystem fs, Path p) throws IOException {
-        FileStatus fileStatus = fs.getFileStatus(p);
-
-        assertEquals(p, fileStatus.getPath());
-        assertNotSame(0, fileStatus.getModificationTime());
-    }
-
-    /**
-     * Check path does not exist in a given FileSystem.
-     *
-     * @param fs FileSystem to check.
-     * @param path Path to check.
-     */
-    private void assertPathDoesNotExist(final FileSystem fs, final Path path) {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(path);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** Helper class to encapsulate source and destination folders. */
-    @SuppressWarnings({"PublicInnerClass", "PublicField"})
-    public static final class Config {
-        /** Source file system. */
-        public final FileSystem srcFs;
-
-        /** Source path to work with. */
-        public final Path src;
-
-        /** Destination file system. */
-        public final FileSystem destFs;
-
-        /** Destination path to work with. */
-        public final Path dest;
-
-        /**
-         * Copying task configuration.
-         *
-         * @param srcFs Source file system.
-         * @param src Source path.
-         * @param destFs Destination file system.
-         * @param dest Destination path.
-         */
-        public Config(FileSystem srcFs, Path src, FileSystem destFs, Path dest) {
-            this.srcFs = srcFs;
-            this.src = src;
-            this.destFs = destFs;
-            this.dest = dest;
-        }
-    }
-
-    /**
-     * Convert path for exception message testing purposes.
-     *
-     * @param path Path.
-     * @return Converted path.
-     * @throws Exception If failed.
-     */
-    private Path convertPath(Path path) throws Exception {
-        if (mode != PROXY)
-            return path;
-        else {
-            URI secondaryUri = new URI(SECONDARY_URI);
-
-            URI pathUri = path.toUri();
-
-            return new Path(new URI(pathUri.getScheme() != null ? secondaryUri.getScheme() : null,
-                pathUri.getAuthority() != null ? secondaryUri.getAuthority() : null, pathUri.getPath(), null, null));
-        }
-    }
-
-    /**
-     * Create configuration for test.
-     *
-     * @param authority Authority.
-     * @param skipEmbed Whether to skip embedded mode.
-     * @param skipLocShmem Whether to skip local shmem mode.
-     * @return Configuration.
-     */
-    private static Configuration configuration(String authority, boolean skipEmbed, boolean skipLocShmem) {
-        Configuration cfg = new Configuration();
-
-        cfg.set("fs.defaultFS", "igfs://" + authority + "/");
-        cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
-        cfg.set("fs.AbstractFileSystem.igfs.impl",
-            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());
-
-        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        if (skipEmbed)
-            cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
-
-        if (skipLocShmem)
-            cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
-
-        return cfg;
-    }
-}
\ No newline at end of file


[40/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
new file mode 100644
index 0000000..32880e4
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.URL;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteClosure;
+import org.apache.ignite.logger.log4j.Log4JLogger;
+import org.apache.ignite.marshaller.jdk.JdkMarshaller;
+
+/**
+ * Hadoop external process base class.
+ */
+public class HadoopExternalProcessStarter {
+    /** Path to Log4j configuration file. */
+    public static final String DFLT_LOG4J_CONFIG = "config/ignite-log4j.xml";
+
+    /** Arguments. */
+    private Args args;
+
+    /** System out. */
+    private OutputStream out;
+
+    /** System err. */
+    private OutputStream err;
+
+    /**
+     * @param args Parsed arguments.
+     */
+    public HadoopExternalProcessStarter(Args args) {
+        this.args = args;
+    }
+
+    /**
+     * @param cmdArgs Process arguments.
+     */
+    public static void main(String[] cmdArgs) {
+        try {
+            Args args = arguments(cmdArgs);
+
+            new HadoopExternalProcessStarter(args).run();
+        }
+        catch (Exception e) {
+            System.err.println("Failed");
+
+            System.err.println(e.getMessage());
+
+            e.printStackTrace(System.err);
+        }
+    }
+
+    /**
+     *
+     * @throws Exception
+     */
+    public void run() throws Exception {
+        U.setWorkDirectory(args.workDir, U.getIgniteHome());
+
+        File outputDir = outputDirectory();
+
+        initializeStreams(outputDir);
+
+        ExecutorService msgExecSvc = Executors.newFixedThreadPool(
+            Integer.getInteger("MSG_THREAD_POOL_SIZE", Runtime.getRuntime().availableProcessors() * 2));
+
+        IgniteLogger log = logger(outputDir);
+
+        HadoopExternalCommunication comm = new HadoopExternalCommunication(
+            args.nodeId,
+            args.childProcId,
+            new JdkMarshaller(),
+            log,
+            msgExecSvc,
+            "external"
+        );
+
+        comm.start();
+
+        HadoopProcessDescriptor nodeDesc = new HadoopProcessDescriptor(args.nodeId, args.parentProcId);
+        nodeDesc.address(args.addr);
+        nodeDesc.tcpPort(args.tcpPort);
+        nodeDesc.sharedMemoryPort(args.shmemPort);
+
+        HadoopChildProcessRunner runner = new HadoopChildProcessRunner();
+
+        runner.start(comm, nodeDesc, msgExecSvc, log);
+
+        System.err.println("Started");
+        System.err.flush();
+
+        System.setOut(new PrintStream(out));
+        System.setErr(new PrintStream(err));
+    }
+
+    /**
+     * @param outputDir Directory for process output.
+     * @throws Exception
+     */
+    private void initializeStreams(File outputDir) throws Exception {
+        out = new FileOutputStream(new File(outputDir, args.childProcId + ".out"));
+        err = new FileOutputStream(new File(outputDir, args.childProcId + ".err"));
+    }
+
+    /**
+     * @return Path to output directory.
+     * @throws IOException If failed.
+     */
+    private File outputDirectory() throws IOException {
+        File f = new File(args.out);
+
+        if (!f.exists()) {
+            if (!f.mkdirs())
+                throw new IOException("Failed to create output directory: " + args.out);
+        }
+        else {
+            if (f.isFile())
+                throw new IOException("Output directory is a file: " + args.out);
+        }
+
+        return f;
+    }
+
+    /**
+     * @param outputDir Directory for process output.
+     * @return Logger.
+     */
+    private IgniteLogger logger(final File outputDir) {
+        final URL url = U.resolveIgniteUrl(DFLT_LOG4J_CONFIG);
+
+        Log4JLogger logger;
+
+        try {
+            logger = url != null ? new Log4JLogger(url) : new Log4JLogger(true);
+        }
+        catch (IgniteCheckedException e) {
+            System.err.println("Failed to create URL-based logger. Will use default one.");
+
+            e.printStackTrace();
+
+            logger = new Log4JLogger(true);
+        }
+
+        logger.updateFilePath(new IgniteClosure<String, String>() {
+            @Override public String apply(String s) {
+                return new File(outputDir, args.childProcId + ".log").getAbsolutePath();
+            }
+        });
+
+        return logger;
+    }
+
+    /**
+     * @param processArgs Process arguments.
+     * @return Child process instance.
+     */
+    private static Args arguments(String[] processArgs) throws Exception {
+        Args args = new Args();
+
+        for (int i = 0; i < processArgs.length; i++) {
+            String arg = processArgs[i];
+
+            switch (arg) {
+                case "-cpid": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing process ID for '-cpid' parameter");
+
+                    String procIdStr = processArgs[++i];
+
+                    args.childProcId = UUID.fromString(procIdStr);
+
+                    break;
+                }
+
+                case "-ppid": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing process ID for '-ppid' parameter");
+
+                    String procIdStr = processArgs[++i];
+
+                    args.parentProcId = UUID.fromString(procIdStr);
+
+                    break;
+                }
+
+                case "-nid": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing node ID for '-nid' parameter");
+
+                    String nodeIdStr = processArgs[++i];
+
+                    args.nodeId = UUID.fromString(nodeIdStr);
+
+                    break;
+                }
+
+                case "-addr": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing node address for '-addr' parameter");
+
+                    args.addr = processArgs[++i];
+
+                    break;
+                }
+
+                case "-tport": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing tcp port for '-tport' parameter");
+
+                    args.tcpPort = Integer.parseInt(processArgs[++i]);
+
+                    break;
+                }
+
+                case "-sport": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing shared memory port for '-sport' parameter");
+
+                    args.shmemPort = Integer.parseInt(processArgs[++i]);
+
+                    break;
+                }
+
+                case "-out": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing output folder name for '-out' parameter");
+
+                    args.out = processArgs[++i];
+
+                    break;
+                }
+
+                case "-wd": {
+                    if (i == processArgs.length - 1)
+                        throw new Exception("Missing work folder name for '-wd' parameter");
+
+                    args.workDir = processArgs[++i];
+
+                    break;
+                }
+            }
+        }
+
+        return args;
+    }
+
+    /**
+     * Execution arguments.
+     */
+    private static class Args {
+        /** Process ID. */
+        private UUID childProcId;
+
+        /** Process ID. */
+        private UUID parentProcId;
+
+        /** Process ID. */
+        private UUID nodeId;
+
+        /** Node address. */
+        private String addr;
+
+        /** TCP port */
+        private int tcpPort;
+
+        /** Shmem port. */
+        private int shmemPort = -1;
+
+        /** Output folder. */
+        private String out;
+
+        /** Work directory. */
+        private String workDir;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
new file mode 100644
index 0000000..ddf6a20
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Implements basic lifecycle for communication clients.
+ */
+public abstract class HadoopAbstractCommunicationClient implements HadoopCommunicationClient {
+    /** Time when this client was last used. */
+    private volatile long lastUsed = U.currentTimeMillis();
+
+    /** Reservations. */
+    private final AtomicInteger reserves = new AtomicInteger();
+
+    /** {@inheritDoc} */
+    @Override public boolean close() {
+        return reserves.compareAndSet(0, -1);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void forceClose() {
+        reserves.set(-1);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean closed() {
+        return reserves.get() == -1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean reserve() {
+        while (true) {
+            int r = reserves.get();
+
+            if (r == -1)
+                return false;
+
+            if (reserves.compareAndSet(r, r + 1))
+                return true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void release() {
+        while (true) {
+            int r = reserves.get();
+
+            if (r == -1)
+                return;
+
+            if (reserves.compareAndSet(r, r - 1))
+                return;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean reserved() {
+        return reserves.get() > 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getIdleTime() {
+        return U.currentTimeMillis() - lastUsed;
+    }
+
+    /**
+     * Updates used time.
+     */
+    protected void markUsed() {
+        lastUsed = U.currentTimeMillis();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopAbstractCommunicationClient.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
new file mode 100644
index 0000000..a325a3d
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+
+/**
+ *
+ */
+public interface HadoopCommunicationClient {
+    /**
+     * @return {@code True} if client has been closed by this call,
+     *      {@code false} if failed to close client (due to concurrent reservation or concurrent close).
+     */
+    public boolean close();
+
+    /**
+     * Forces client close.
+     */
+    public void forceClose();
+
+    /**
+     * @return {@code True} if client is closed;
+     */
+    public boolean closed();
+
+    /**
+     * @return {@code True} if client was reserved, {@code false} otherwise.
+     */
+    public boolean reserve();
+
+    /**
+     * Releases this client by decreasing reservations.
+     */
+    public void release();
+
+    /**
+     * @return {@code True} if client was reserved.
+     */
+    public boolean reserved();
+
+    /**
+     * Gets idle time of this client.
+     *
+     * @return Idle time of this client.
+     */
+    public long getIdleTime();
+
+    /**
+     * @param desc Process descriptor.
+     * @param msg Message to send.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
new file mode 100644
index 0000000..1d59a95
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java
@@ -0,0 +1,1460 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.SocketChannel;
+import java.util.Collection;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.util.GridConcurrentFactory;
+import org.apache.ignite.internal.util.GridKeyLock;
+import org.apache.ignite.internal.util.ipc.IpcEndpoint;
+import org.apache.ignite.internal.util.ipc.shmem.IpcOutOfSystemResourcesException;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryClientEndpoint;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
+import org.apache.ignite.internal.util.nio.GridBufferedParser;
+import org.apache.ignite.internal.util.nio.GridNioAsyncNotifyFilter;
+import org.apache.ignite.internal.util.nio.GridNioCodecFilter;
+import org.apache.ignite.internal.util.nio.GridNioFilter;
+import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
+import org.apache.ignite.internal.util.nio.GridNioFuture;
+import org.apache.ignite.internal.util.nio.GridNioMessageTracker;
+import org.apache.ignite.internal.util.nio.GridNioServer;
+import org.apache.ignite.internal.util.nio.GridNioServerListener;
+import org.apache.ignite.internal.util.nio.GridNioServerListenerAdapter;
+import org.apache.ignite.internal.util.nio.GridNioSession;
+import org.apache.ignite.internal.util.nio.GridNioSessionMetaKey;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.LT;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.marshaller.Marshaller;
+import org.apache.ignite.thread.IgniteThread;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentLinkedDeque8;
+
+/**
+ * Hadoop external communication class.
+ */
+public class HadoopExternalCommunication {
+    /** IPC error message. */
+    public static final String OUT_OF_RESOURCES_TCP_MSG = "Failed to allocate shared memory segment " +
+        "(switching to TCP, may be slower).";
+
+    /** Default port which node sets listener to (value is <tt>47100</tt>). */
+    public static final int DFLT_PORT = 27100;
+
+    /** Default connection timeout (value is <tt>1000</tt>ms). */
+    public static final long DFLT_CONN_TIMEOUT = 1000;
+
+    /** Default Maximum connection timeout (value is <tt>600,000</tt>ms). */
+    public static final long DFLT_MAX_CONN_TIMEOUT = 10 * 60 * 1000;
+
+    /** Default reconnect attempts count (value is <tt>10</tt>). */
+    public static final int DFLT_RECONNECT_CNT = 10;
+
+    /** Default message queue limit per connection (for incoming and outgoing . */
+    public static final int DFLT_MSG_QUEUE_LIMIT = GridNioServer.DFLT_SEND_QUEUE_LIMIT;
+
+    /**
+     * Default count of selectors for TCP server equals to
+     * {@code "Math.min(4, Runtime.getRuntime().availableProcessors())"}.
+     */
+    public static final int DFLT_SELECTORS_CNT = 1;
+
+    /** Node ID meta for session. */
+    private static final int PROCESS_META = GridNioSessionMetaKey.nextUniqueKey();
+
+    /** Handshake timeout meta for session. */
+    private static final int HANDSHAKE_FINISH_META = GridNioSessionMetaKey.nextUniqueKey();
+
+    /** Message tracker meta for session. */
+    private static final int TRACKER_META = GridNioSessionMetaKey.nextUniqueKey();
+
+    /**
+     * Default local port range (value is <tt>100</tt>).
+     * See {@link #setLocalPortRange(int)} for details.
+     */
+    public static final int DFLT_PORT_RANGE = 100;
+
+    /** Default value for {@code TCP_NODELAY} socket option (value is <tt>true</tt>). */
+    public static final boolean DFLT_TCP_NODELAY = true;
+
+    /** Server listener. */
+    private final GridNioServerListener<HadoopMessage> srvLsnr =
+        new GridNioServerListenerAdapter<HadoopMessage>() {
+            @Override public void onConnected(GridNioSession ses) {
+                HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
+
+                assert desc != null : "Received connected notification without finished handshake: " + ses;
+            }
+
+            /** {@inheritDoc} */
+            @Override public void onDisconnected(GridNioSession ses, @Nullable Exception e) {
+                if (log.isDebugEnabled())
+                    log.debug("Closed connection for session: " + ses);
+
+                if (e != null)
+                    U.error(log, "Session disconnected due to exception: " + ses, e);
+
+                HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
+
+                if (desc != null) {
+                    HadoopCommunicationClient rmv = clients.remove(desc.processId());
+
+                    if (rmv != null)
+                        rmv.forceClose();
+                }
+
+                HadoopMessageListener lsnr0 = lsnr;
+
+                if (lsnr0 != null)
+                    // Notify listener about connection close.
+                    lsnr0.onConnectionLost(desc);
+            }
+
+            /** {@inheritDoc} */
+            @Override public void onMessage(GridNioSession ses, HadoopMessage msg) {
+                notifyListener(ses.<HadoopProcessDescriptor>meta(PROCESS_META), msg);
+
+                if (msgQueueLimit > 0) {
+                    GridNioMessageTracker tracker = ses.meta(TRACKER_META);
+
+                    assert tracker != null : "Missing tracker for limited message queue: " + ses;
+
+                    tracker.run();
+                }
+            }
+        };
+
+    /** Logger. */
+    private IgniteLogger log;
+
+    /** Local process descriptor. */
+    private HadoopProcessDescriptor locProcDesc;
+
+    /** Marshaller. */
+    private Marshaller marsh;
+
+    /** Message notification executor service. */
+    private ExecutorService execSvc;
+
+    /** Grid name. */
+    private String gridName;
+
+    /** Complex variable that represents this node IP address. */
+    private volatile InetAddress locHost;
+
+    /** Local port which node uses. */
+    private int locPort = DFLT_PORT;
+
+    /** Local port range. */
+    private int locPortRange = DFLT_PORT_RANGE;
+
+    /** Local port which node uses to accept shared memory connections. */
+    private int shmemPort = -1;
+
+    /** Allocate direct buffer or heap buffer. */
+    private boolean directBuf = true;
+
+    /** Connect timeout. */
+    private long connTimeout = DFLT_CONN_TIMEOUT;
+
+    /** Maximum connect timeout. */
+    private long maxConnTimeout = DFLT_MAX_CONN_TIMEOUT;
+
+    /** Reconnect attempts count. */
+    @SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"})
+    private int reconCnt = DFLT_RECONNECT_CNT;
+
+    /** Socket send buffer. */
+    private int sockSndBuf;
+
+    /** Socket receive buffer. */
+    private int sockRcvBuf;
+
+    /** Message queue limit. */
+    private int msgQueueLimit = DFLT_MSG_QUEUE_LIMIT;
+
+    /** NIO server. */
+    private GridNioServer<HadoopMessage> nioSrvr;
+
+    /** Shared memory server. */
+    private IpcSharedMemoryServerEndpoint shmemSrv;
+
+    /** {@code TCP_NODELAY} option value for created sockets. */
+    private boolean tcpNoDelay = DFLT_TCP_NODELAY;
+
+    /** Shared memory accept worker. */
+    private ShmemAcceptWorker shmemAcceptWorker;
+
+    /** Shared memory workers. */
+    private final Collection<ShmemWorker> shmemWorkers = new ConcurrentLinkedDeque8<>();
+
+    /** Clients. */
+    private final ConcurrentMap<UUID, HadoopCommunicationClient> clients = GridConcurrentFactory.newMap();
+
+    /** Message listener. */
+    private volatile HadoopMessageListener lsnr;
+
+    /** Bound port. */
+    private int boundTcpPort = -1;
+
+    /** Bound port for shared memory server. */
+    private int boundTcpShmemPort = -1;
+
+    /** Count of selectors to use in TCP server. */
+    private int selectorsCnt = DFLT_SELECTORS_CNT;
+
+    /** Local node ID message. */
+    private ProcessHandshakeMessage locIdMsg;
+
+    /** Locks. */
+    private final GridKeyLock locks = new GridKeyLock();
+
+    /**
+     * @param parentNodeId Parent node ID.
+     * @param procId Process ID.
+     * @param marsh Marshaller to use.
+     * @param log Logger.
+     * @param execSvc Executor service for message notification.
+     * @param gridName Grid name.
+     */
+    public HadoopExternalCommunication(
+        UUID parentNodeId,
+        UUID procId,
+        Marshaller marsh,
+        IgniteLogger log,
+        ExecutorService execSvc,
+        String gridName
+    ) {
+        locProcDesc = new HadoopProcessDescriptor(parentNodeId, procId);
+
+        this.marsh = marsh;
+        this.log = log.getLogger(HadoopExternalCommunication.class);
+        this.execSvc = execSvc;
+        this.gridName = gridName;
+    }
+
+    /**
+     * Sets local port for socket binding.
+     * <p>
+     * If not provided, default value is {@link #DFLT_PORT}.
+     *
+     * @param locPort Port number.
+     */
+    public void setLocalPort(int locPort) {
+        this.locPort = locPort;
+    }
+
+    /**
+     * Gets local port for socket binding.
+     *
+     * @return Local port.
+     */
+    public int getLocalPort() {
+        return locPort;
+    }
+
+    /**
+     * Sets local port range for local host ports (value must greater than or equal to <tt>0</tt>).
+     * If provided local port (see {@link #setLocalPort(int)}} is occupied,
+     * implementation will try to increment the port number for as long as it is less than
+     * initial value plus this range.
+     * <p>
+     * If port range value is <tt>0</tt>, then implementation will try bind only to the port provided by
+     * {@link #setLocalPort(int)} method and fail if binding to this port did not succeed.
+     * <p>
+     * Local port range is very useful during development when more than one grid nodes need to run
+     * on the same physical machine.
+     * <p>
+     * If not provided, default value is {@link #DFLT_PORT_RANGE}.
+     *
+     * @param locPortRange New local port range.
+     */
+    public void setLocalPortRange(int locPortRange) {
+        this.locPortRange = locPortRange;
+    }
+
+    /**
+     * @return Local port range.
+     */
+    public int getLocalPortRange() {
+        return locPortRange;
+    }
+
+    /**
+     * Sets local port to accept shared memory connections.
+     * <p>
+     * If set to {@code -1} shared memory communication will be disabled.
+     * <p>
+     * If not provided, shared memory is disabled.
+     *
+     * @param shmemPort Port number.
+     */
+    public void setSharedMemoryPort(int shmemPort) {
+        this.shmemPort = shmemPort;
+    }
+
+    /**
+     * Gets shared memory port to accept incoming connections.
+     *
+     * @return Shared memory port.
+     */
+    public int getSharedMemoryPort() {
+        return shmemPort;
+    }
+
+    /**
+     * Sets connect timeout used when establishing connection
+     * with remote nodes.
+     * <p>
+     * {@code 0} is interpreted as infinite timeout.
+     * <p>
+     * If not provided, default value is {@link #DFLT_CONN_TIMEOUT}.
+     *
+     * @param connTimeout Connect timeout.
+     */
+    public void setConnectTimeout(long connTimeout) {
+        this.connTimeout = connTimeout;
+    }
+
+    /**
+     * @return Connection timeout.
+     */
+    public long getConnectTimeout() {
+        return connTimeout;
+    }
+
+    /**
+     * Sets maximum connect timeout. If handshake is not established within connect timeout,
+     * then SPI tries to repeat handshake procedure with increased connect timeout.
+     * Connect timeout can grow till maximum timeout value,
+     * if maximum timeout value is reached then the handshake is considered as failed.
+     * <p>
+     * {@code 0} is interpreted as infinite timeout.
+     * <p>
+     * If not provided, default value is {@link #DFLT_MAX_CONN_TIMEOUT}.
+     *
+     * @param maxConnTimeout Maximum connect timeout.
+     */
+    public void setMaxConnectTimeout(long maxConnTimeout) {
+        this.maxConnTimeout = maxConnTimeout;
+    }
+
+    /**
+     * Gets maximum connection timeout.
+     *
+     * @return Maximum connection timeout.
+     */
+    public long getMaxConnectTimeout() {
+        return maxConnTimeout;
+    }
+
+    /**
+     * Sets maximum number of reconnect attempts used when establishing connection
+     * with remote nodes.
+     * <p>
+     * If not provided, default value is {@link #DFLT_RECONNECT_CNT}.
+     *
+     * @param reconCnt Maximum number of reconnection attempts.
+     */
+    public void setReconnectCount(int reconCnt) {
+        this.reconCnt = reconCnt;
+    }
+
+    /**
+     * @return Reconnect count.
+     */
+    public int getReconnectCount() {
+        return reconCnt;
+    }
+
+    /**
+     * Sets flag to allocate direct or heap buffer in SPI.
+     * If value is {@code true}, then SPI will use {@link ByteBuffer#allocateDirect(int)} call.
+     * Otherwise, SPI will use {@link ByteBuffer#allocate(int)} call.
+     * <p>
+     * If not provided, default value is {@code true}.
+     *
+     * @param directBuf Flag indicates to allocate direct or heap buffer in SPI.
+     */
+    public void setDirectBuffer(boolean directBuf) {
+        this.directBuf = directBuf;
+    }
+
+    /**
+     * @return Direct buffer flag.
+     */
+    public boolean isDirectBuffer() {
+        return directBuf;
+    }
+
+    /**
+     * Sets the count of selectors te be used in TCP server.
+     * <p/>
+     * If not provided, default value is {@link #DFLT_SELECTORS_CNT}.
+     *
+     * @param selectorsCnt Selectors count.
+     */
+    public void setSelectorsCount(int selectorsCnt) {
+        this.selectorsCnt = selectorsCnt;
+    }
+
+    /**
+     * @return Number of selectors to use.
+     */
+    public int getSelectorsCount() {
+        return selectorsCnt;
+    }
+
+    /**
+     * Sets value for {@code TCP_NODELAY} socket option. Each
+     * socket will be opened using provided value.
+     * <p>
+     * Setting this option to {@code true} disables Nagle's algorithm
+     * for socket decreasing latency and delivery time for small messages.
+     * <p>
+     * For systems that work under heavy network load it is advisable to
+     * set this value to {@code false}.
+     * <p>
+     * If not provided, default value is {@link #DFLT_TCP_NODELAY}.
+     *
+     * @param tcpNoDelay {@code True} to disable TCP delay.
+     */
+    public void setTcpNoDelay(boolean tcpNoDelay) {
+        this.tcpNoDelay = tcpNoDelay;
+    }
+
+    /**
+     * @return {@code TCP_NO_DELAY} flag.
+     */
+    public boolean isTcpNoDelay() {
+        return tcpNoDelay;
+    }
+
+    /**
+     * Sets receive buffer size for sockets created or accepted by this SPI.
+     * <p>
+     * If not provided, default is {@code 0} which leaves buffer unchanged after
+     * socket creation (OS defaults).
+     *
+     * @param sockRcvBuf Socket receive buffer size.
+     */
+    public void setSocketReceiveBuffer(int sockRcvBuf) {
+        this.sockRcvBuf = sockRcvBuf;
+    }
+
+    /**
+     * @return Socket receive buffer size.
+     */
+    public int getSocketReceiveBuffer() {
+        return sockRcvBuf;
+    }
+
+    /**
+     * Sets send buffer size for sockets created or accepted by this SPI.
+     * <p>
+     * If not provided, default is {@code 0} which leaves the buffer unchanged
+     * after socket creation (OS defaults).
+     *
+     * @param sockSndBuf Socket send buffer size.
+     */
+    public void setSocketSendBuffer(int sockSndBuf) {
+        this.sockSndBuf = sockSndBuf;
+    }
+
+    /**
+     * @return Socket send buffer size.
+     */
+    public int getSocketSendBuffer() {
+        return sockSndBuf;
+    }
+
+    /**
+     * Sets message queue limit for incoming and outgoing messages.
+     * <p>
+     * When set to positive number send queue is limited to the configured value.
+     * {@code 0} disables the size limitations.
+     * <p>
+     * If not provided, default is {@link #DFLT_MSG_QUEUE_LIMIT}.
+     *
+     * @param msgQueueLimit Send queue size limit.
+     */
+    public void setMessageQueueLimit(int msgQueueLimit) {
+        this.msgQueueLimit = msgQueueLimit;
+    }
+
+    /**
+     * @return Message queue size limit.
+     */
+    public int getMessageQueueLimit() {
+        return msgQueueLimit;
+    }
+
+    /**
+     * Sets Hadoop communication message listener.
+     *
+     * @param lsnr Message listener.
+     */
+    public void setListener(HadoopMessageListener lsnr) {
+        this.lsnr = lsnr;
+    }
+
+    /**
+     * @return Outbound message queue size.
+     */
+    public int getOutboundMessagesQueueSize() {
+        return nioSrvr.outboundMessagesQueueSize();
+    }
+
+    /**
+     * Starts communication.
+     *
+     * @throws IgniteCheckedException If failed.
+     */
+    public void start() throws IgniteCheckedException {
+        try {
+            locHost = U.getLocalHost();
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException("Failed to initialize local address.", e);
+        }
+
+        try {
+            shmemSrv = resetShmemServer();
+        }
+        catch (IgniteCheckedException e) {
+            U.warn(log, "Failed to start shared memory communication server.", e);
+        }
+
+        try {
+            // This method potentially resets local port to the value
+            // local node was bound to.
+            nioSrvr = resetNioServer();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteCheckedException("Failed to initialize TCP server: " + locHost, e);
+        }
+
+        locProcDesc.address(locHost.getHostAddress());
+        locProcDesc.sharedMemoryPort(boundTcpShmemPort);
+        locProcDesc.tcpPort(boundTcpPort);
+
+        locIdMsg = new ProcessHandshakeMessage(locProcDesc);
+
+        if (shmemSrv != null) {
+            shmemAcceptWorker = new ShmemAcceptWorker(shmemSrv);
+
+            new IgniteThread(shmemAcceptWorker).start();
+        }
+
+        nioSrvr.start();
+    }
+
+    /**
+     * Gets local process descriptor.
+     *
+     * @return Local process descriptor.
+     */
+    public HadoopProcessDescriptor localProcessDescriptor() {
+        return locProcDesc;
+    }
+
+    /**
+     * Gets filters used by communication.
+     *
+     * @return Filters array.
+     */
+    private GridNioFilter[] filters() {
+        return new GridNioFilter[] {
+            new GridNioAsyncNotifyFilter(gridName, execSvc, log),
+            new HandshakeAndBackpressureFilter(),
+            new HadoopMarshallerFilter(marsh),
+            new GridNioCodecFilter(new GridBufferedParser(directBuf, ByteOrder.nativeOrder()), log, false)
+        };
+    }
+
+    /**
+     * Recreates tpcSrvr socket instance.
+     *
+     * @return Server instance.
+     * @throws IgniteCheckedException Thrown if it's not possible to create server.
+     */
+    private GridNioServer<HadoopMessage> resetNioServer() throws IgniteCheckedException {
+        if (boundTcpPort >= 0)
+            throw new IgniteCheckedException("Tcp NIO server was already created on port " + boundTcpPort);
+
+        IgniteCheckedException lastEx = null;
+
+        // If configured TCP port is busy, find first available in range.
+        for (int port = locPort; port < locPort + locPortRange; port++) {
+            try {
+                GridNioServer<HadoopMessage> srvr =
+                    GridNioServer.<HadoopMessage>builder()
+                        .address(locHost)
+                        .port(port)
+                        .listener(srvLsnr)
+                        .logger(log.getLogger(GridNioServer.class))
+                        .selectorCount(selectorsCnt)
+                        .gridName(gridName)
+                        .tcpNoDelay(tcpNoDelay)
+                        .directBuffer(directBuf)
+                        .byteOrder(ByteOrder.nativeOrder())
+                        .socketSendBufferSize(sockSndBuf)
+                        .socketReceiveBufferSize(sockRcvBuf)
+                        .sendQueueLimit(msgQueueLimit)
+                        .directMode(false)
+                        .filters(filters())
+                        .build();
+
+                boundTcpPort = port;
+
+                // Ack Port the TCP server was bound to.
+                if (log.isInfoEnabled())
+                    log.info("Successfully bound to TCP port [port=" + boundTcpPort +
+                        ", locHost=" + locHost + ']');
+
+                return srvr;
+            }
+            catch (IgniteCheckedException e) {
+                lastEx = e;
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to bind to local port (will try next port within range) [port=" + port +
+                        ", locHost=" + locHost + ']');
+            }
+        }
+
+        // If free port wasn't found.
+        throw new IgniteCheckedException("Failed to bind to any port within range [startPort=" + locPort +
+            ", portRange=" + locPortRange + ", locHost=" + locHost + ']', lastEx);
+    }
+
+    /**
+     * Creates new shared memory communication server.
+     * @return Server.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable private IpcSharedMemoryServerEndpoint resetShmemServer() throws IgniteCheckedException {
+        if (boundTcpShmemPort >= 0)
+            throw new IgniteCheckedException("Shared memory server was already created on port " + boundTcpShmemPort);
+
+        if (shmemPort == -1 || U.isWindows())
+            return null;
+
+        IgniteCheckedException lastEx = null;
+
+        // If configured TCP port is busy, find first available in range.
+        for (int port = shmemPort; port < shmemPort + locPortRange; port++) {
+            try {
+                IpcSharedMemoryServerEndpoint srv = new IpcSharedMemoryServerEndpoint(
+                    log.getLogger(IpcSharedMemoryServerEndpoint.class),
+                    locProcDesc.processId(), gridName);
+
+                srv.setPort(port);
+
+                srv.omitOutOfResourcesWarning(true);
+
+                srv.start();
+
+                boundTcpShmemPort = port;
+
+                // Ack Port the TCP server was bound to.
+                if (log.isInfoEnabled())
+                    log.info("Successfully bound shared memory communication to TCP port [port=" + boundTcpShmemPort +
+                        ", locHost=" + locHost + ']');
+
+                return srv;
+            }
+            catch (IgniteCheckedException e) {
+                lastEx = e;
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to bind to local port (will try next port within range) [port=" + port +
+                        ", locHost=" + locHost + ']');
+            }
+        }
+
+        // If free port wasn't found.
+        throw new IgniteCheckedException("Failed to bind shared memory communication to any port within range [startPort=" +
+            locPort + ", portRange=" + locPortRange + ", locHost=" + locHost + ']', lastEx);
+    }
+
+    /**
+     * Stops the server.
+     *
+     * @throws IgniteCheckedException
+     */
+    public void stop() throws IgniteCheckedException {
+        // Stop TCP server.
+        if (nioSrvr != null)
+            nioSrvr.stop();
+
+        U.cancel(shmemAcceptWorker);
+        U.join(shmemAcceptWorker, log);
+
+        U.cancel(shmemWorkers);
+        U.join(shmemWorkers, log);
+
+        shmemWorkers.clear();
+
+        // Force closing on stop (safety).
+        for (HadoopCommunicationClient client : clients.values())
+            client.forceClose();
+
+        // Clear resources.
+        nioSrvr = null;
+
+        boundTcpPort = -1;
+    }
+
+    /**
+     * Sends message to Hadoop process.
+     *
+     * @param desc
+     * @param msg
+     * @throws IgniteCheckedException
+     */
+    public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws
+        IgniteCheckedException {
+        assert desc != null;
+        assert msg != null;
+
+        if (log.isTraceEnabled())
+            log.trace("Sending message to Hadoop process [desc=" + desc + ", msg=" + msg + ']');
+
+        HadoopCommunicationClient client = null;
+
+        boolean closeOnRelease = true;
+
+        try {
+            client = reserveClient(desc);
+
+            client.sendMessage(desc, msg);
+
+            closeOnRelease = false;
+        }
+        finally {
+            if (client != null) {
+                if (closeOnRelease) {
+                    client.forceClose();
+
+                    clients.remove(desc.processId(), client);
+                }
+                else
+                    client.release();
+            }
+        }
+    }
+
+    /**
+     * Returns existing or just created client to node.
+     *
+     * @param desc Node to which client should be open.
+     * @return The existing or just created client.
+     * @throws IgniteCheckedException Thrown if any exception occurs.
+     */
+    private HadoopCommunicationClient reserveClient(HadoopProcessDescriptor desc) throws IgniteCheckedException {
+        assert desc != null;
+
+        UUID procId = desc.processId();
+
+        while (true) {
+            HadoopCommunicationClient client = clients.get(procId);
+
+            if (client == null) {
+                if (log.isDebugEnabled())
+                    log.debug("Did not find client for remote process [locProcDesc=" + locProcDesc + ", desc=" +
+                        desc + ']');
+
+                // Do not allow concurrent connects.
+                Object sync = locks.lock(procId);
+
+                try {
+                    client = clients.get(procId);
+
+                    if (client == null) {
+                        HadoopCommunicationClient old = clients.put(procId, client = createNioClient(desc));
+
+                        assert old == null;
+                    }
+                }
+                finally {
+                    locks.unlock(procId, sync);
+                }
+
+                assert client != null;
+            }
+
+            if (client.reserve())
+                return client;
+            else
+                // Client has just been closed by idle worker. Help it and try again.
+                clients.remove(procId, client);
+        }
+    }
+
+    /**
+     * @param desc Process descriptor.
+     * @return Client.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable protected HadoopCommunicationClient createNioClient(HadoopProcessDescriptor desc)
+        throws  IgniteCheckedException {
+        assert desc != null;
+
+        int shmemPort = desc.sharedMemoryPort();
+
+        // If remote node has shared memory server enabled and has the same set of MACs
+        // then we are likely to run on the same host and shared memory communication could be tried.
+        if (shmemPort != -1 && locProcDesc.parentNodeId().equals(desc.parentNodeId())) {
+            try {
+                return createShmemClient(desc, shmemPort);
+            }
+            catch (IgniteCheckedException e) {
+                if (e.hasCause(IpcOutOfSystemResourcesException.class))
+                    // Has cause or is itself the IpcOutOfSystemResourcesException.
+                    LT.warn(log, null, OUT_OF_RESOURCES_TCP_MSG);
+                else if (log.isDebugEnabled())
+                    log.debug("Failed to establish shared memory connection with local hadoop process: " +
+                        desc);
+            }
+        }
+
+        return createTcpClient(desc);
+    }
+
+    /**
+     * @param desc Process descriptor.
+     * @param port Port.
+     * @return Client.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable protected HadoopCommunicationClient createShmemClient(HadoopProcessDescriptor desc, int port)
+        throws IgniteCheckedException {
+        int attempt = 1;
+
+        int connectAttempts = 1;
+
+        long connTimeout0 = connTimeout;
+
+        while (true) {
+            IpcEndpoint clientEndpoint;
+
+            try {
+                clientEndpoint = new IpcSharedMemoryClientEndpoint(port, (int)connTimeout, log);
+            }
+            catch (IgniteCheckedException e) {
+                // Reconnect for the second time, if connection is not established.
+                if (connectAttempts < 2 && X.hasCause(e, ConnectException.class)) {
+                    connectAttempts++;
+
+                    continue;
+                }
+
+                throw e;
+            }
+
+            HadoopCommunicationClient client = null;
+
+            try {
+                ShmemWorker worker = new ShmemWorker(clientEndpoint, false);
+
+                shmemWorkers.add(worker);
+
+                GridNioSession ses = worker.session();
+
+                HandshakeFinish fin = new HandshakeFinish();
+
+                // We are in lock, it is safe to get session and attach
+                ses.addMeta(HANDSHAKE_FINISH_META, fin);
+
+                client = new HadoopTcpNioCommunicationClient(ses);
+
+                new IgniteThread(worker).start();
+
+                fin.await(connTimeout0);
+            }
+            catch (HadoopHandshakeTimeoutException e) {
+                if (log.isDebugEnabled())
+                    log.debug("Handshake timed out (will retry with increased timeout) [timeout=" + connTimeout0 +
+                        ", err=" + e.getMessage() + ", client=" + client + ']');
+
+                if (client != null)
+                    client.forceClose();
+
+                if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
+                    if (log.isDebugEnabled())
+                        log.debug("Handshake timedout (will stop attempts to perform the handshake) " +
+                            "[timeout=" + connTimeout0 + ", maxConnTimeout=" + maxConnTimeout +
+                            ", attempt=" + attempt + ", reconCnt=" + reconCnt +
+                            ", err=" + e.getMessage() + ", client=" + client + ']');
+
+                    throw e;
+                }
+                else {
+                    attempt++;
+
+                    connTimeout0 *= 2;
+
+                    continue;
+                }
+            }
+            catch (RuntimeException | Error e) {
+                if (log.isDebugEnabled())
+                    log.debug(
+                        "Caught exception (will close client) [err=" + e.getMessage() + ", client=" + client + ']');
+
+                if (client != null)
+                    client.forceClose();
+
+                throw e;
+            }
+
+            return client;
+        }
+    }
+
+    /**
+     * Establish TCP connection to remote hadoop process and returns client.
+     *
+     * @param desc Process descriptor.
+     * @return Client.
+     * @throws IgniteCheckedException If failed.
+     */
+    protected HadoopCommunicationClient createTcpClient(HadoopProcessDescriptor desc) throws IgniteCheckedException {
+        String addr = desc.address();
+
+        int port = desc.tcpPort();
+
+        if (log.isDebugEnabled())
+            log.debug("Trying to connect to remote process [locProcDesc=" + locProcDesc + ", desc=" + desc + ']');
+
+        boolean conn = false;
+        HadoopTcpNioCommunicationClient client = null;
+        IgniteCheckedException errs = null;
+
+        int connectAttempts = 1;
+
+        long connTimeout0 = connTimeout;
+
+        int attempt = 1;
+
+        while (!conn) { // Reconnection on handshake timeout.
+            try {
+                SocketChannel ch = SocketChannel.open();
+
+                ch.configureBlocking(true);
+
+                ch.socket().setTcpNoDelay(tcpNoDelay);
+                ch.socket().setKeepAlive(true);
+
+                if (sockRcvBuf > 0)
+                    ch.socket().setReceiveBufferSize(sockRcvBuf);
+
+                if (sockSndBuf > 0)
+                    ch.socket().setSendBufferSize(sockSndBuf);
+
+                ch.socket().connect(new InetSocketAddress(addr, port), (int)connTimeout);
+
+                HandshakeFinish fin = new HandshakeFinish();
+
+                GridNioSession ses = nioSrvr.createSession(ch, F.asMap(HANDSHAKE_FINISH_META, fin)).get();
+
+                client = new HadoopTcpNioCommunicationClient(ses);
+
+                if (log.isDebugEnabled())
+                    log.debug("Waiting for handshake finish for client: " + client);
+
+                fin.await(connTimeout0);
+
+                conn = true;
+            }
+            catch (HadoopHandshakeTimeoutException e) {
+                if (client != null) {
+                    client.forceClose();
+
+                    client = null;
+                }
+
+                if (log.isDebugEnabled())
+                    log.debug(
+                        "Handshake timedout (will retry with increased timeout) [timeout=" + connTimeout0 +
+                            ", desc=" + desc + ", port=" + port + ", err=" + e + ']');
+
+                if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
+                    if (log.isDebugEnabled())
+                        log.debug("Handshake timed out (will stop attempts to perform the handshake) " +
+                            "[timeout=" + connTimeout0 + ", maxConnTimeout=" + maxConnTimeout +
+                            ", attempt=" + attempt + ", reconCnt=" + reconCnt +
+                            ", err=" + e.getMessage() + ", addr=" + addr + ']');
+
+                    if (errs == null)
+                        errs = new IgniteCheckedException("Failed to connect to remote Hadoop process " +
+                            "(is process still running?) [desc=" + desc + ", addrs=" + addr + ']');
+
+                    errs.addSuppressed(e);
+
+                    break;
+                }
+                else {
+                    attempt++;
+
+                    connTimeout0 *= 2;
+
+                    // Continue loop.
+                }
+            }
+            catch (Exception e) {
+                if (client != null) {
+                    client.forceClose();
+
+                    client = null;
+                }
+
+                if (log.isDebugEnabled())
+                    log.debug("Client creation failed [addr=" + addr + ", port=" + port +
+                        ", err=" + e + ']');
+
+                if (X.hasCause(e, SocketTimeoutException.class))
+                    LT.warn(log, null, "Connect timed out (consider increasing 'connTimeout' " +
+                        "configuration property) [addr=" + addr + ", port=" + port + ']');
+
+                if (errs == null)
+                    errs = new IgniteCheckedException("Failed to connect to remote Hadoop process (is process still running?) " +
+                        "[desc=" + desc + ", addrs=" + addr + ']');
+
+                errs.addSuppressed(e);
+
+                // Reconnect for the second time, if connection is not established.
+                if (connectAttempts < 2 &&
+                    (e instanceof ConnectException || X.hasCause(e, ConnectException.class))) {
+                    connectAttempts++;
+
+                    continue;
+                }
+
+                break;
+            }
+        }
+
+        if (client == null) {
+            assert errs != null;
+
+            if (X.hasCause(errs, ConnectException.class))
+                LT.warn(log, null, "Failed to connect to a remote Hadoop process (is process still running?). " +
+                    "Make sure operating system firewall is disabled on local and remote host) " +
+                    "[addrs=" + addr + ", port=" + port + ']');
+
+            throw errs;
+        }
+
+        if (log.isDebugEnabled())
+            log.debug("Created client: " + client);
+
+        return client;
+    }
+
+    /**
+     * @param desc Sender process descriptor.
+     * @param msg Communication message.
+     */
+    protected void notifyListener(HadoopProcessDescriptor desc, HadoopMessage msg) {
+        HadoopMessageListener lsnr = this.lsnr;
+
+        if (lsnr != null)
+            // Notify listener of a new message.
+            lsnr.onMessageReceived(desc, msg);
+        else if (log.isDebugEnabled())
+            log.debug("Received communication message without any registered listeners (will ignore) " +
+                "[senderProcDesc=" + desc + ", msg=" + msg + ']');
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopExternalCommunication.class, this);
+    }
+
+    /**
+     * This worker takes responsibility to shut the server down when stopping,
+     * No other thread shall stop passed server.
+     */
+    private class ShmemAcceptWorker extends GridWorker {
+        /** */
+        private final IpcSharedMemoryServerEndpoint srv;
+
+        /**
+         * @param srv Server.
+         */
+        ShmemAcceptWorker(IpcSharedMemoryServerEndpoint srv) {
+            super(gridName, "shmem-communication-acceptor", HadoopExternalCommunication.this.log);
+
+            this.srv = srv;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException {
+            try {
+                while (!Thread.interrupted()) {
+                    ShmemWorker e = new ShmemWorker(srv.accept(), true);
+
+                    shmemWorkers.add(e);
+
+                    new IgniteThread(e).start();
+                }
+            }
+            catch (IgniteCheckedException e) {
+                if (!isCancelled())
+                    U.error(log, "Shmem server failed.", e);
+            }
+            finally {
+                srv.close();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cancel() {
+            super.cancel();
+
+            srv.close();
+        }
+    }
+
+    /**
+     *
+     */
+    private class ShmemWorker extends GridWorker {
+        /** */
+        private final IpcEndpoint endpoint;
+
+        /** Adapter. */
+        private HadoopIpcToNioAdapter<HadoopMessage> adapter;
+
+        /**
+         * @param endpoint Endpoint.
+         */
+        private ShmemWorker(IpcEndpoint endpoint, boolean accepted) {
+            super(gridName, "shmem-worker", HadoopExternalCommunication.this.log);
+
+            this.endpoint = endpoint;
+
+            adapter = new HadoopIpcToNioAdapter<>(
+                HadoopExternalCommunication.this.log,
+                endpoint,
+                accepted,
+                srvLsnr,
+                filters());
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException {
+            try {
+                adapter.serve();
+            }
+            finally {
+                shmemWorkers.remove(this);
+
+                endpoint.close();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cancel() {
+            super.cancel();
+
+            endpoint.close();
+        }
+
+        /** @{@inheritDoc} */
+        @Override protected void cleanup() {
+            super.cleanup();
+
+            endpoint.close();
+        }
+
+        /** @{@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(ShmemWorker.class, this);
+        }
+
+        /**
+         * @return NIO session for this worker.
+         */
+        public GridNioSession session() {
+            return adapter.session();
+        }
+    }
+
+    /**
+     *
+     */
+    private static class HandshakeFinish {
+        /** Await latch. */
+        private CountDownLatch latch = new CountDownLatch(1);
+
+        /**
+         * Finishes handshake.
+         */
+        public void finish() {
+            latch.countDown();
+        }
+
+        /**
+         * @param time Time to wait.
+         * @throws HadoopHandshakeTimeoutException If failed to wait.
+         */
+        public void await(long time) throws HadoopHandshakeTimeoutException {
+            try {
+                if (!latch.await(time, TimeUnit.MILLISECONDS))
+                    throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish [timeout=" +
+                        time + ']');
+            }
+            catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+
+                throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish (thread was " +
+                    "interrupted) [timeout=" + time + ']', e);
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private class HandshakeAndBackpressureFilter extends GridNioFilterAdapter {
+        /**
+         * Assigns filter name to a filter.
+         */
+        protected HandshakeAndBackpressureFilter() {
+            super("HadoopHandshakeFilter");
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionOpened(final GridNioSession ses) throws IgniteCheckedException {
+            if (ses.accepted()) {
+                if (log.isDebugEnabled())
+                    log.debug("Accepted connection, initiating handshake: " + ses);
+
+                // Server initiates handshake.
+                ses.send(locIdMsg).listen(new CI1<IgniteInternalFuture<?>>() {
+                    @Override public void apply(IgniteInternalFuture<?> fut) {
+                        try {
+                            // Make sure there were no errors.
+                            fut.get();
+                        }
+                        catch (IgniteCheckedException e) {
+                            log.warning("Failed to send handshake message, will close session: " + ses, e);
+
+                            ses.close();
+                        }
+                    }
+                });
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionClosed(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
+            proceedExceptionCaught(ses, ex);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) throws IgniteCheckedException {
+            if (ses.meta(PROCESS_META) == null && !(msg instanceof ProcessHandshakeMessage))
+                log.warning("Writing message before handshake has finished [ses=" + ses + ", msg=" + msg + ']');
+
+            return proceedSessionWrite(ses, msg);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
+            HadoopProcessDescriptor desc = ses.meta(PROCESS_META);
+
+            UUID rmtProcId = desc == null ? null : desc.processId();
+
+            if (rmtProcId == null) {
+                if (!(msg instanceof ProcessHandshakeMessage)) {
+                    log.warning("Invalid handshake message received, will close connection [ses=" + ses +
+                        ", msg=" + msg + ']');
+
+                    ses.close();
+
+                    return;
+                }
+
+                ProcessHandshakeMessage nId = (ProcessHandshakeMessage)msg;
+
+                if (log.isDebugEnabled())
+                    log.debug("Received handshake message [ses=" + ses + ", msg=" + msg + ']');
+
+                ses.addMeta(PROCESS_META, nId.processDescriptor());
+
+                if (!ses.accepted())
+                    // Send handshake reply.
+                    ses.send(locIdMsg);
+                else {
+                    //
+                    rmtProcId = nId.processDescriptor().processId();
+
+                    if (log.isDebugEnabled())
+                        log.debug("Finished handshake with remote client: " + ses);
+
+                    Object sync = locks.tryLock(rmtProcId);
+
+                    if (sync != null) {
+                        try {
+                            if (clients.get(rmtProcId) == null) {
+                                if (log.isDebugEnabled())
+                                    log.debug("Will reuse session for descriptor: " + rmtProcId);
+
+                                // Handshake finished flag is true.
+                                clients.put(rmtProcId, new HadoopTcpNioCommunicationClient(ses));
+                            }
+                            else {
+                                if (log.isDebugEnabled())
+                                    log.debug("Will not reuse client as another already exists [locProcDesc=" +
+                                        locProcDesc + ", desc=" + desc + ']');
+                            }
+                        }
+                        finally {
+                            locks.unlock(rmtProcId, sync);
+                        }
+                    }
+                    else {
+                        if (log.isDebugEnabled())
+                            log.debug("Concurrent connection is being established, will not reuse client session [" +
+                                "locProcDesc=" + locProcDesc + ", desc=" + desc + ']');
+                    }
+                }
+
+                if (log.isDebugEnabled())
+                    log.debug("Handshake is finished for session [ses=" + ses + ", locProcDesc=" + locProcDesc + ']');
+
+                HandshakeFinish to = ses.meta(HANDSHAKE_FINISH_META);
+
+                if (to != null)
+                    to.finish();
+
+                // Notify session opened (both parties).
+                proceedSessionOpened(ses);
+            }
+            else {
+                if (msgQueueLimit > 0) {
+                    GridNioMessageTracker tracker = ses.meta(TRACKER_META);
+
+                    if (tracker == null) {
+                        GridNioMessageTracker old = ses.addMeta(TRACKER_META, tracker =
+                            new GridNioMessageTracker(ses, msgQueueLimit));
+
+                        assert old == null;
+                    }
+
+                    tracker.onMessageReceived();
+                }
+
+                proceedMessageReceived(ses, msg);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) throws IgniteCheckedException {
+            return proceedSessionClose(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionIdleTimeout(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionWriteTimeout(ses);
+        }
+    }
+
+    /**
+     * Process ID message.
+     */
+    @SuppressWarnings("PublicInnerClass")
+    public static class ProcessHandshakeMessage implements HadoopMessage {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Node ID. */
+        private HadoopProcessDescriptor procDesc;
+
+        /** */
+        public ProcessHandshakeMessage() {
+            // No-op.
+        }
+
+        /**
+         * @param procDesc Process descriptor.
+         */
+        private ProcessHandshakeMessage(HadoopProcessDescriptor procDesc) {
+            this.procDesc = procDesc;
+        }
+
+        /**
+         * @return Process ID.
+         */
+        public HadoopProcessDescriptor processDescriptor() {
+            return procDesc;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void writeExternal(ObjectOutput out) throws IOException {
+            out.writeObject(procDesc);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+            procDesc = (HadoopProcessDescriptor)in.readObject();
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(ProcessHandshakeMessage.class, this);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
new file mode 100644
index 0000000..b2a85e1
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.jetbrains.annotations.Nullable;
+
+/** Internal exception class for proper timeout handling. */
+class HadoopHandshakeTimeoutException extends IgniteCheckedException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * @param msg Message.
+     */
+    HadoopHandshakeTimeoutException(String msg) {
+        super(msg);
+    }
+
+    /**
+     * @param msg Message.
+     * @param cause Cause.
+     */
+    HadoopHandshakeTimeoutException(String msg, @Nullable Throwable cause) {
+        super(msg, cause);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
new file mode 100644
index 0000000..a8de999
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.util.ipc.IpcEndpoint;
+import org.apache.ignite.internal.util.nio.GridNioFilter;
+import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
+import org.apache.ignite.internal.util.nio.GridNioFilterChain;
+import org.apache.ignite.internal.util.nio.GridNioFinishedFuture;
+import org.apache.ignite.internal.util.nio.GridNioFuture;
+import org.apache.ignite.internal.util.nio.GridNioServerListener;
+import org.apache.ignite.internal.util.nio.GridNioSession;
+import org.apache.ignite.internal.util.nio.GridNioSessionImpl;
+
+/**
+ * Allows to re-use existing {@link GridNioFilter}s on IPC (specifically shared memory IPC)
+ * communications.
+ *
+ * Note that this class consumes an entire thread inside {@link #serve()} method
+ * in order to serve one {@link org.apache.ignite.internal.util.ipc.IpcEndpoint}.
+ */
+public class HadoopIpcToNioAdapter<T> {
+    /** */
+    private final IpcEndpoint endp;
+
+    /** */
+    private final GridNioFilterChain<T> chain;
+
+    /** */
+    private final GridNioSessionImpl ses;
+
+    /** */
+    private final AtomicReference<CountDownLatch> latchRef = new AtomicReference<>();
+
+    /** */
+    private final ByteBuffer writeBuf;
+
+    /**
+     * @param log Log.
+     * @param endp Endpoint.
+     * @param lsnr Listener.
+     * @param filters Filters.
+     */
+    public HadoopIpcToNioAdapter(IgniteLogger log, IpcEndpoint endp, boolean accepted,
+        GridNioServerListener<T> lsnr, GridNioFilter... filters) {
+        this.endp = endp;
+
+        chain = new GridNioFilterChain<>(log, lsnr, new HeadFilter(), filters);
+        ses = new GridNioSessionImpl(chain, null, null, accepted);
+
+        writeBuf = ByteBuffer.allocate(8 << 10);
+
+        writeBuf.order(ByteOrder.nativeOrder());
+    }
+
+    /**
+     * Serves given set of listeners repeatedly reading data from the endpoint.
+     *
+     * @throws InterruptedException If interrupted.
+     */
+    public void serve() throws InterruptedException {
+        try {
+            chain.onSessionOpened(ses);
+
+            InputStream in = endp.inputStream();
+
+            ByteBuffer readBuf = ByteBuffer.allocate(8 << 10);
+
+            readBuf.order(ByteOrder.nativeOrder());
+
+            assert readBuf.hasArray();
+
+            while (!Thread.interrupted()) {
+                int pos = readBuf.position();
+
+                int read = in.read(readBuf.array(), pos, readBuf.remaining());
+
+                if (read > 0) {
+                    readBuf.position(0);
+                    readBuf.limit(pos + read);
+
+                    chain.onMessageReceived(ses, readBuf);
+
+                    if (readBuf.hasRemaining())
+                        readBuf.compact();
+                    else
+                        readBuf.clear();
+
+                    CountDownLatch latch = latchRef.get();
+
+                    if (latch != null)
+                        latch.await();
+                }
+                else if (read < 0) {
+                    endp.close();
+
+                    break; // And close below.
+                }
+            }
+
+            // Assuming remote end closed connection - pushing event from head to tail.
+            chain.onSessionClosed(ses);
+        }
+        catch (Exception e) {
+            chain.onExceptionCaught(ses, new IgniteCheckedException("Failed to read from IPC endpoint.", e));
+        }
+    }
+
+    /**
+     * Gets dummy session for this adapter.
+     *
+     * @return Session.
+     */
+    public GridNioSession session() {
+        return ses;
+    }
+
+    /**
+     * Handles write events on chain.
+     *
+     * @param msg Buffer to send.
+     * @return Send result.
+     */
+    private GridNioFuture<?> send(ByteBuffer msg) {
+        assert writeBuf.hasArray();
+
+        try {
+            while (msg.hasRemaining()) {
+                writeBuf.clear();
+
+                writeBuf.put(msg);
+
+                endp.outputStream().write(writeBuf.array(), 0, writeBuf.position());
+            }
+        }
+        catch (IOException | IgniteCheckedException e) {
+            return new GridNioFinishedFuture<Object>(e);
+        }
+
+        return new GridNioFinishedFuture<>((Object)null);
+    }
+
+    /**
+     * Filter forwarding messages from chain's head to this server.
+     */
+    private class HeadFilter extends GridNioFilterAdapter {
+        /**
+         * Assigns filter name.
+         */
+        protected HeadFilter() {
+            super("HeadFilter");
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionOpened(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionOpened(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionClosed(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
+            proceedExceptionCaught(ses, ex);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) {
+            assert ses == HadoopIpcToNioAdapter.this.ses : "ses=" + ses +
+                ", this.ses=" + HadoopIpcToNioAdapter.this.ses;
+
+            return send((ByteBuffer)msg);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
+            proceedMessageReceived(ses, msg);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<?> onPauseReads(GridNioSession ses) throws IgniteCheckedException {
+            // This call should be synced externally to avoid races.
+            boolean b = latchRef.compareAndSet(null, new CountDownLatch(1));
+
+            assert b;
+
+            return new GridNioFinishedFuture<>(b);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<?> onResumeReads(GridNioSession ses) throws IgniteCheckedException {
+            // This call should be synced externally to avoid races.
+            CountDownLatch latch = latchRef.getAndSet(null);
+
+            if (latch != null)
+                latch.countDown();
+
+            return new GridNioFinishedFuture<Object>(latch != null);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) {
+            assert ses == HadoopIpcToNioAdapter.this.ses;
+
+            boolean closed = HadoopIpcToNioAdapter.this.ses.setClosed();
+
+            if (closed)
+                endp.close();
+
+            return new GridNioFinishedFuture<>(closed);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionIdleTimeout(ses);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
+            proceedSessionWriteTimeout(ses);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
new file mode 100644
index 0000000..3f79469
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.nio.GridNioFilterAdapter;
+import org.apache.ignite.internal.util.nio.GridNioFuture;
+import org.apache.ignite.internal.util.nio.GridNioSession;
+import org.apache.ignite.marshaller.Marshaller;
+
+/**
+ * Serialization filter.
+ */
+public class HadoopMarshallerFilter extends GridNioFilterAdapter {
+    /** Marshaller. */
+    private Marshaller marshaller;
+
+    /**
+     * @param marshaller Marshaller to use.
+     */
+    public HadoopMarshallerFilter(Marshaller marshaller) {
+        super("HadoopMarshallerFilter");
+
+        this.marshaller = marshaller;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onSessionOpened(GridNioSession ses) throws IgniteCheckedException {
+        proceedSessionOpened(ses);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
+        proceedSessionClosed(ses);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
+        proceedExceptionCaught(ses, ex);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg) throws IgniteCheckedException {
+        assert msg instanceof HadoopMessage : "Invalid message type: " + msg;
+
+        return proceedSessionWrite(ses, marshaller.marshal(msg));
+    }
+
+    @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
+        assert msg instanceof byte[];
+
+        // Always unmarshal with system classloader.
+        proceedMessageReceived(ses, marshaller.unmarshal((byte[])msg, null));
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) throws IgniteCheckedException {
+        return proceedSessionClose(ses);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
+        proceedSessionIdleTimeout(ses);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
+        proceedSessionWriteTimeout(ses);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
new file mode 100644
index 0000000..6d50f43
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+
+/**
+ * Hadoop communication message listener.
+ */
+public interface HadoopMessageListener {
+    /**
+     * @param desc Process descriptor.
+     * @param msg Hadoop message.
+     */
+    public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg);
+
+    /**
+     * Called when connection to remote process was lost.
+     *
+     * @param desc Process descriptor.
+     */
+    public void onConnectionLost(HadoopProcessDescriptor desc);
+}
\ No newline at end of file


[44/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
new file mode 100644
index 0000000..f3e17f3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
@@ -0,0 +1,1706 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.jobtracker;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.cache.event.CacheEntryEvent;
+import javax.cache.event.CacheEntryUpdatedListener;
+import javax.cache.expiry.Duration;
+import javax.cache.expiry.ExpiryPolicy;
+import javax.cache.expiry.ModifiedExpiryPolicy;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.events.DiscoveryEvent;
+import org.apache.ignite.events.Event;
+import org.apache.ignite.events.EventType;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
+import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
+import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
+import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
+import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
+import org.apache.ignite.internal.processors.hadoop.HadoopContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlanner;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+import org.apache.ignite.internal.util.GridMutex;
+import org.apache.ignite.internal.util.GridSpinReadWriteLock;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.CIX1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.SB;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentHashMap8;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_CANCELLING;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_COMPLETE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_MAP;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_REDUCE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_SETUP;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.ABORT;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.COMMIT;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.REDUCE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.SETUP;
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.COMPLETED;
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.CRASHED;
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.FAILED;
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.RUNNING;
+
+/**
+ * Hadoop job tracker.
+ */
+public class HadoopJobTracker extends HadoopComponent {
+    /** */
+    private final GridMutex mux = new GridMutex();
+
+    /** */
+    private volatile IgniteInternalCache<HadoopJobId, HadoopJobMetadata> jobMetaPrj;
+
+    /** Projection with expiry policy for finished job updates. */
+    private volatile IgniteInternalCache<HadoopJobId, HadoopJobMetadata> finishedJobMetaPrj;
+
+    /** Map-reduce execution planner. */
+    @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+    private HadoopMapReducePlanner mrPlanner;
+
+    /** All the known jobs. */
+    private final ConcurrentMap<HadoopJobId, GridFutureAdapter<HadoopJob>> jobs = new ConcurrentHashMap8<>();
+
+    /** Locally active jobs. */
+    private final ConcurrentMap<HadoopJobId, JobLocalState> activeJobs = new ConcurrentHashMap8<>();
+
+    /** Locally requested finish futures. */
+    private final ConcurrentMap<HadoopJobId, GridFutureAdapter<HadoopJobId>> activeFinishFuts =
+        new ConcurrentHashMap8<>();
+
+    /** Event processing service. */
+    private ExecutorService evtProcSvc;
+
+    /** Component busy lock. */
+    private GridSpinReadWriteLock busyLock;
+
+    /** Class to create HadoopJob instances from. */
+    private Class<? extends HadoopJob> jobCls;
+
+    /** Closure to check result of async transform of system cache. */
+    private final IgniteInClosure<IgniteInternalFuture<?>> failsLog = new CI1<IgniteInternalFuture<?>>() {
+        @Override public void apply(IgniteInternalFuture<?> gridFut) {
+            try {
+                gridFut.get();
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to transform system cache.", e);
+            }
+        }
+    };
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void start(final HadoopContext ctx) throws IgniteCheckedException {
+        super.start(ctx);
+
+        busyLock = new GridSpinReadWriteLock();
+
+        evtProcSvc = Executors.newFixedThreadPool(1);
+
+        UUID nodeId = ctx.localNodeId();
+
+        assert jobCls == null;
+
+        String[] libNames = null;
+
+        if (ctx.configuration() != null)
+            libNames = ctx.configuration().getNativeLibraryNames();
+
+        HadoopClassLoader ldr = new HadoopClassLoader(null, HadoopClassLoader.nameForJob(nodeId), libNames);
+
+        try {
+            jobCls = (Class<HadoopV2Job>)ldr.loadClass(HadoopV2Job.class.getName());
+        }
+        catch (Exception ioe) {
+            throw new IgniteCheckedException("Failed to load job class [class="
+                + HadoopV2Job.class.getName() + ']', ioe);
+        }
+    }
+
+    /**
+     * @return Job meta projection.
+     */
+    @SuppressWarnings("NonPrivateFieldAccessedInSynchronizedContext")
+    private IgniteInternalCache<HadoopJobId, HadoopJobMetadata> jobMetaCache() {
+        IgniteInternalCache<HadoopJobId, HadoopJobMetadata> prj = jobMetaPrj;
+
+        if (prj == null) {
+            synchronized (mux) {
+                if ((prj = jobMetaPrj) == null) {
+                    GridCacheAdapter<HadoopJobId, HadoopJobMetadata> sysCache = ctx.kernalContext().cache()
+                        .internalCache(CU.SYS_CACHE_HADOOP_MR);
+
+                    assert sysCache != null;
+
+                    mrPlanner = ctx.planner();
+
+                    try {
+                        ctx.kernalContext().resource().injectGeneric(mrPlanner);
+                    }
+                    catch (IgniteCheckedException e) { // Must not happen.
+                        U.error(log, "Failed to inject resources.", e);
+
+                        throw new IllegalStateException(e);
+                    }
+
+                    jobMetaPrj = prj = sysCache;
+
+                    if (ctx.configuration().getFinishedJobInfoTtl() > 0) {
+                        ExpiryPolicy finishedJobPlc = new ModifiedExpiryPolicy(
+                            new Duration(MILLISECONDS, ctx.configuration().getFinishedJobInfoTtl()));
+
+                        finishedJobMetaPrj = prj.withExpiryPolicy(finishedJobPlc);
+                    }
+                    else
+                        finishedJobMetaPrj = jobMetaPrj;
+                }
+            }
+        }
+
+        return prj;
+    }
+
+    /**
+     * @return Projection with expiry policy for finished job updates.
+     */
+    private IgniteInternalCache<HadoopJobId, HadoopJobMetadata> finishedJobMetaCache() {
+        IgniteInternalCache<HadoopJobId, HadoopJobMetadata> prj = finishedJobMetaPrj;
+
+        if (prj == null) {
+            jobMetaCache();
+
+            prj = finishedJobMetaPrj;
+
+            assert prj != null;
+        }
+
+        return prj;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public void onKernalStart() throws IgniteCheckedException {
+        super.onKernalStart();
+
+        jobMetaCache().context().continuousQueries().executeInternalQuery(
+            new CacheEntryUpdatedListener<HadoopJobId, HadoopJobMetadata>() {
+                @Override public void onUpdated(final Iterable<CacheEntryEvent<? extends HadoopJobId,
+                    ? extends HadoopJobMetadata>> evts) {
+                    if (!busyLock.tryReadLock())
+                        return;
+
+                    try {
+                        // Must process query callback in a separate thread to avoid deadlocks.
+                        evtProcSvc.submit(new EventHandler() {
+                            @Override protected void body() throws IgniteCheckedException {
+                                processJobMetadataUpdates(evts);
+                            }
+                        });
+                    }
+                    finally {
+                        busyLock.readUnlock();
+                    }
+                }
+            },
+            null,
+            true,
+            true,
+            false
+        );
+
+        ctx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() {
+            @Override public void onEvent(final Event evt) {
+                if (!busyLock.tryReadLock())
+                    return;
+
+                try {
+                    // Must process discovery callback in a separate thread to avoid deadlock.
+                    evtProcSvc.submit(new EventHandler() {
+                        @Override protected void body() {
+                            processNodeLeft((DiscoveryEvent)evt);
+                        }
+                    });
+                }
+                finally {
+                    busyLock.readUnlock();
+                }
+            }
+        }, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onKernalStop(boolean cancel) {
+        super.onKernalStop(cancel);
+
+        busyLock.writeLock();
+
+        evtProcSvc.shutdown();
+
+        // Fail all pending futures.
+        for (GridFutureAdapter<HadoopJobId> fut : activeFinishFuts.values())
+            fut.onDone(new IgniteCheckedException("Failed to execute Hadoop map-reduce job (grid is stopping)."));
+    }
+
+    /**
+     * Submits execution of Hadoop job to grid.
+     *
+     * @param jobId Job ID.
+     * @param info Job info.
+     * @return Job completion future.
+     */
+    @SuppressWarnings("unchecked")
+    public IgniteInternalFuture<HadoopJobId> submit(HadoopJobId jobId, HadoopJobInfo info) {
+        if (!busyLock.tryReadLock()) {
+            return new GridFinishedFuture<>(new IgniteCheckedException("Failed to execute map-reduce job " +
+                "(grid is stopping): " + info));
+        }
+
+        try {
+            long jobPrepare = U.currentTimeMillis();
+
+            if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId))
+                throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
+
+            HadoopJob job = job(jobId, info);
+
+            HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null);
+
+            HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info);
+
+            meta.mapReducePlan(mrPlan);
+
+            meta.pendingSplits(allSplits(mrPlan));
+            meta.pendingReducers(allReducers(mrPlan));
+
+            GridFutureAdapter<HadoopJobId> completeFut = new GridFutureAdapter<>();
+
+            GridFutureAdapter<HadoopJobId> old = activeFinishFuts.put(jobId, completeFut);
+
+            assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']';
+
+            if (log.isDebugEnabled())
+                log.debug("Submitting job metadata [jobId=" + jobId + ", meta=" + meta + ']');
+
+            long jobStart = U.currentTimeMillis();
+
+            HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(),
+                ctx.localNodeId());
+
+            perfCntr.clientSubmissionEvents(info);
+            perfCntr.onJobPrepare(jobPrepare);
+            perfCntr.onJobStart(jobStart);
+
+            if (jobMetaCache().getAndPutIfAbsent(jobId, meta) != null)
+                throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
+
+            return completeFut;
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to submit job: " + jobId, e);
+
+            return new GridFinishedFuture<>(e);
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * Convert Hadoop job metadata to job status.
+     *
+     * @param meta Metadata.
+     * @return Status.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public static HadoopJobStatus status(HadoopJobMetadata meta) {
+        HadoopJobInfo jobInfo = meta.jobInfo();
+
+        return new HadoopJobStatus(
+            meta.jobId(),
+            jobInfo.jobName(),
+            jobInfo.user(),
+            meta.pendingSplits() != null ? meta.pendingSplits().size() : 0,
+            meta.pendingReducers() != null ? meta.pendingReducers().size() : 0,
+            meta.mapReducePlan().mappers(),
+            meta.mapReducePlan().reducers(),
+            meta.phase(),
+            meta.failCause() != null,
+            meta.version()
+        );
+    }
+
+    /**
+     * Gets hadoop job status for given job ID.
+     *
+     * @param jobId Job ID to get status for.
+     * @return Job status for given job ID or {@code null} if job was not found.
+     */
+    @Nullable public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock())
+            return null; // Grid is stopping.
+
+        try {
+            HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+            return meta != null ? status(meta) : null;
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * Gets job finish future.
+     *
+     * @param jobId Job ID.
+     * @return Finish future or {@code null}.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock())
+            return null; // Grid is stopping.
+
+        try {
+            HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+            if (meta == null)
+                return null;
+
+            if (log.isTraceEnabled())
+                log.trace("Got job metadata for status check [locNodeId=" + ctx.localNodeId() + ", meta=" + meta + ']');
+
+            if (meta.phase() == PHASE_COMPLETE) {
+                if (log.isTraceEnabled())
+                    log.trace("Job is complete, returning finished future: " + jobId);
+
+                return new GridFinishedFuture<>(jobId);
+            }
+
+            GridFutureAdapter<HadoopJobId> fut = F.addIfAbsent(activeFinishFuts, jobId,
+                new GridFutureAdapter<HadoopJobId>());
+
+            // Get meta from cache one more time to close the window.
+            meta = jobMetaCache().get(jobId);
+
+            if (log.isTraceEnabled())
+                log.trace("Re-checking job metadata [locNodeId=" + ctx.localNodeId() + ", meta=" + meta + ']');
+
+            if (meta == null) {
+                fut.onDone();
+
+                activeFinishFuts.remove(jobId , fut);
+            }
+            else if (meta.phase() == PHASE_COMPLETE) {
+                fut.onDone(jobId, meta.failCause());
+
+                activeFinishFuts.remove(jobId , fut);
+            }
+
+            return fut;
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * Gets job plan by job ID.
+     *
+     * @param jobId Job ID.
+     * @return Job plan.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopMapReducePlan plan(HadoopJobId jobId) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock())
+            return null;
+
+        try {
+            HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+            if (meta != null)
+                return meta.mapReducePlan();
+
+            return null;
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * Callback from task executor invoked when a task has been finished.
+     *
+     * @param info Task info.
+     * @param status Task status.
+     */
+    @SuppressWarnings({"ConstantConditions", "ThrowableResultOfMethodCallIgnored"})
+    public void onTaskFinished(HadoopTaskInfo info, HadoopTaskStatus status) {
+        if (!busyLock.tryReadLock())
+            return;
+
+        try {
+            assert status.state() != RUNNING;
+
+            if (log.isDebugEnabled())
+                log.debug("Received task finished callback [info=" + info + ", status=" + status + ']');
+
+            JobLocalState state = activeJobs.get(info.jobId());
+
+            // Task CRASHes with null fail cause.
+            assert (status.state() != FAILED) || status.failCause() != null :
+                "Invalid task status [info=" + info + ", status=" + status + ']';
+
+            assert state != null || (ctx.jobUpdateLeader() && (info.type() == COMMIT || info.type() == ABORT)):
+                "Missing local state for finished task [info=" + info + ", status=" + status + ']';
+
+            StackedProcessor incrCntrs = null;
+
+            if (status.state() == COMPLETED)
+                incrCntrs = new IncrementCountersProcessor(null, status.counters());
+
+            switch (info.type()) {
+                case SETUP: {
+                    state.onSetupFinished(info, status, incrCntrs);
+
+                    break;
+                }
+
+                case MAP: {
+                    state.onMapFinished(info, status, incrCntrs);
+
+                    break;
+                }
+
+                case REDUCE: {
+                    state.onReduceFinished(info, status, incrCntrs);
+
+                    break;
+                }
+
+                case COMBINE: {
+                    state.onCombineFinished(info, status, incrCntrs);
+
+                    break;
+                }
+
+                case COMMIT:
+                case ABORT: {
+                    IgniteInternalCache<HadoopJobId, HadoopJobMetadata> cache = finishedJobMetaCache();
+
+                    cache.invokeAsync(info.jobId(), new UpdatePhaseProcessor(incrCntrs, PHASE_COMPLETE)).
+                        listen(failsLog);
+
+                    break;
+                }
+            }
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * @param jobId Job id.
+     * @param c Closure of operation.
+     */
+    private void transform(HadoopJobId jobId, EntryProcessor<HadoopJobId, HadoopJobMetadata, Void> c) {
+        jobMetaCache().invokeAsync(jobId, c).listen(failsLog);
+    }
+
+    /**
+     * Callback from task executor called when process is ready to received shuffle messages.
+     *
+     * @param jobId Job ID.
+     * @param reducers Reducers.
+     * @param desc Process descriptor.
+     */
+    public void onExternalMappersInitialized(HadoopJobId jobId, Collection<Integer> reducers,
+        HadoopProcessDescriptor desc) {
+        transform(jobId, new InitializeReducersProcessor(null, reducers, desc));
+    }
+
+    /**
+     * Gets all input splits for given hadoop map-reduce plan.
+     *
+     * @param plan Map-reduce plan.
+     * @return Collection of all input splits that should be processed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private Map<HadoopInputSplit, Integer> allSplits(HadoopMapReducePlan plan) {
+        Map<HadoopInputSplit, Integer> res = new HashMap<>();
+
+        int taskNum = 0;
+
+        for (UUID nodeId : plan.mapperNodeIds()) {
+            for (HadoopInputSplit split : plan.mappers(nodeId)) {
+                if (res.put(split, taskNum++) != null)
+                    throw new IllegalStateException("Split duplicate.");
+            }
+        }
+
+        return res;
+    }
+
+    /**
+     * Gets all reducers for this job.
+     *
+     * @param plan Map-reduce plan.
+     * @return Collection of reducers.
+     */
+    private Collection<Integer> allReducers(HadoopMapReducePlan plan) {
+        Collection<Integer> res = new HashSet<>();
+
+        for (int i = 0; i < plan.reducers(); i++)
+            res.add(i);
+
+        return res;
+    }
+
+    /**
+     * Processes node leave (or fail) event.
+     *
+     * @param evt Discovery event.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private void processNodeLeft(DiscoveryEvent evt) {
+        if (log.isDebugEnabled())
+            log.debug("Processing discovery event [locNodeId=" + ctx.localNodeId() + ", evt=" + evt + ']');
+
+        // Check only if this node is responsible for job status updates.
+        if (ctx.jobUpdateLeader()) {
+            boolean checkSetup = evt.eventNode().order() < ctx.localNodeOrder();
+
+            // Iteration over all local entries is correct since system cache is REPLICATED.
+            for (Object metaObj : jobMetaCache().values()) {
+                HadoopJobMetadata meta = (HadoopJobMetadata)metaObj;
+
+                HadoopJobId jobId = meta.jobId();
+
+                HadoopMapReducePlan plan = meta.mapReducePlan();
+
+                HadoopJobPhase phase = meta.phase();
+
+                try {
+                    if (checkSetup && phase == PHASE_SETUP && !activeJobs.containsKey(jobId)) {
+                        // Failover setup task.
+                        HadoopJob job = job(jobId, meta.jobInfo());
+
+                        Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
+
+                        assert setupTask != null;
+
+                        ctx.taskExecutor().run(job, setupTask);
+                    }
+                    else if (phase == PHASE_MAP || phase == PHASE_REDUCE) {
+                        // Must check all nodes, even that are not event node ID due to
+                        // multiple node failure possibility.
+                        Collection<HadoopInputSplit> cancelSplits = null;
+
+                        for (UUID nodeId : plan.mapperNodeIds()) {
+                            if (ctx.kernalContext().discovery().node(nodeId) == null) {
+                                // Node has left the grid.
+                                Collection<HadoopInputSplit> mappers = plan.mappers(nodeId);
+
+                                if (cancelSplits == null)
+                                    cancelSplits = new HashSet<>();
+
+                                cancelSplits.addAll(mappers);
+                            }
+                        }
+
+                        Collection<Integer> cancelReducers = null;
+
+                        for (UUID nodeId : plan.reducerNodeIds()) {
+                            if (ctx.kernalContext().discovery().node(nodeId) == null) {
+                                // Node has left the grid.
+                                int[] reducers = plan.reducers(nodeId);
+
+                                if (cancelReducers == null)
+                                    cancelReducers = new HashSet<>();
+
+                                for (int rdc : reducers)
+                                    cancelReducers.add(rdc);
+                            }
+                        }
+
+                        if (cancelSplits != null || cancelReducers != null)
+                            jobMetaCache().invoke(meta.jobId(), new CancelJobProcessor(null, new IgniteCheckedException(
+                                "One or more nodes participating in map-reduce job execution failed."), cancelSplits,
+                                cancelReducers));
+                    }
+                }
+                catch (IgniteCheckedException e) {
+                    U.error(log, "Failed to cancel job: " + meta, e);
+                }
+            }
+        }
+    }
+
+    /**
+     * @param updated Updated cache entries.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void processJobMetadataUpdates(
+        Iterable<CacheEntryEvent<? extends HadoopJobId, ? extends HadoopJobMetadata>> updated)
+        throws IgniteCheckedException {
+        UUID locNodeId = ctx.localNodeId();
+
+        for (CacheEntryEvent<? extends HadoopJobId, ? extends HadoopJobMetadata> entry : updated) {
+            HadoopJobId jobId = entry.getKey();
+            HadoopJobMetadata meta = entry.getValue();
+
+            if (meta == null || !ctx.isParticipating(meta))
+                continue;
+
+            if (log.isDebugEnabled())
+                log.debug("Processing job metadata update callback [locNodeId=" + locNodeId +
+                    ", meta=" + meta + ']');
+
+            try {
+                ctx.taskExecutor().onJobStateChanged(meta);
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to process job state changed callback (will fail the job) " +
+                    "[locNodeId=" + locNodeId + ", jobId=" + jobId + ", meta=" + meta + ']', e);
+
+                transform(jobId, new CancelJobProcessor(null, e));
+
+                continue;
+            }
+
+            processJobMetaUpdate(jobId, meta, locNodeId);
+        }
+    }
+
+    /**
+     * @param jobId  Job ID.
+     * @param plan Map-reduce plan.
+     */
+    @SuppressWarnings({"unused", "ConstantConditions" })
+    private void printPlan(HadoopJobId jobId, HadoopMapReducePlan plan) {
+        log.info("Plan for " + jobId);
+
+        SB b = new SB();
+
+        b.a("   Map: ");
+
+        for (UUID nodeId : plan.mapperNodeIds())
+            b.a(nodeId).a("=").a(plan.mappers(nodeId).size()).a(' ');
+
+        log.info(b.toString());
+
+        b = new SB();
+
+        b.a("   Reduce: ");
+
+        for (UUID nodeId : plan.reducerNodeIds())
+            b.a(nodeId).a("=").a(Arrays.toString(plan.reducers(nodeId))).a(' ');
+
+        log.info(b.toString());
+    }
+
+    /**
+     * @param jobId Job ID.
+     * @param meta Job metadata.
+     * @param locNodeId Local node ID.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void processJobMetaUpdate(HadoopJobId jobId, HadoopJobMetadata meta, UUID locNodeId)
+        throws IgniteCheckedException {
+        JobLocalState state = activeJobs.get(jobId);
+
+        HadoopJob job = job(jobId, meta.jobInfo());
+
+        HadoopMapReducePlan plan = meta.mapReducePlan();
+
+        switch (meta.phase()) {
+            case PHASE_SETUP: {
+                if (ctx.jobUpdateLeader()) {
+                    Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
+
+                    if (setupTask != null)
+                        ctx.taskExecutor().run(job, setupTask);
+                }
+
+                break;
+            }
+
+            case PHASE_MAP: {
+                // Check if we should initiate new task on local node.
+                Collection<HadoopTaskInfo> tasks = mapperTasks(plan.mappers(locNodeId), meta);
+
+                if (tasks != null)
+                    ctx.taskExecutor().run(job, tasks);
+
+                break;
+            }
+
+            case PHASE_REDUCE: {
+                if (meta.pendingReducers().isEmpty() && ctx.jobUpdateLeader()) {
+                    HadoopTaskInfo info = new HadoopTaskInfo(COMMIT, jobId, 0, 0, null);
+
+                    if (log.isDebugEnabled())
+                        log.debug("Submitting COMMIT task for execution [locNodeId=" + locNodeId +
+                                ", jobId=" + jobId + ']');
+
+                    ctx.taskExecutor().run(job, Collections.singletonList(info));
+
+                    break;
+                }
+
+                Collection<HadoopTaskInfo> tasks = reducerTasks(plan.reducers(locNodeId), job);
+
+                if (tasks != null)
+                    ctx.taskExecutor().run(job, tasks);
+
+                break;
+            }
+
+            case PHASE_CANCELLING: {
+                // Prevent multiple task executor notification.
+                if (state != null && state.onCancel()) {
+                    if (log.isDebugEnabled())
+                        log.debug("Cancelling local task execution for job: " + meta);
+
+                    ctx.taskExecutor().cancelTasks(jobId);
+                }
+
+                if (meta.pendingSplits().isEmpty() && meta.pendingReducers().isEmpty()) {
+                    if (ctx.jobUpdateLeader()) {
+                        if (state == null)
+                            state = initState(jobId);
+
+                        // Prevent running multiple abort tasks.
+                        if (state.onAborted()) {
+                            HadoopTaskInfo info = new HadoopTaskInfo(ABORT, jobId, 0, 0, null);
+
+                            if (log.isDebugEnabled())
+                                log.debug("Submitting ABORT task for execution [locNodeId=" + locNodeId +
+                                        ", jobId=" + jobId + ']');
+
+                            ctx.taskExecutor().run(job, Collections.singletonList(info));
+                        }
+                    }
+
+                    break;
+                }
+                else {
+                    // Check if there are unscheduled mappers or reducers.
+                    Collection<HadoopInputSplit> cancelMappers = new ArrayList<>();
+                    Collection<Integer> cancelReducers = new ArrayList<>();
+
+                    Collection<HadoopInputSplit> mappers = plan.mappers(ctx.localNodeId());
+
+                    if (mappers != null) {
+                        for (HadoopInputSplit b : mappers) {
+                            if (state == null || !state.mapperScheduled(b))
+                                cancelMappers.add(b);
+                        }
+                    }
+
+                    int[] rdc = plan.reducers(ctx.localNodeId());
+
+                    if (rdc != null) {
+                        for (int r : rdc) {
+                            if (state == null || !state.reducerScheduled(r))
+                                cancelReducers.add(r);
+                        }
+                    }
+
+                    if (!cancelMappers.isEmpty() || !cancelReducers.isEmpty())
+                        transform(jobId, new CancelJobProcessor(null, cancelMappers, cancelReducers));
+                }
+
+                break;
+            }
+
+            case PHASE_COMPLETE: {
+                if (log.isDebugEnabled())
+                    log.debug("Job execution is complete, will remove local state from active jobs " +
+                        "[jobId=" + jobId + ", meta=" + meta + ']');
+
+                if (state != null) {
+                    state = activeJobs.remove(jobId);
+
+                    assert state != null;
+
+                    ctx.shuffle().jobFinished(jobId);
+                }
+
+                GridFutureAdapter<HadoopJobId> finishFut = activeFinishFuts.remove(jobId);
+
+                if (finishFut != null) {
+                    if (log.isDebugEnabled())
+                        log.debug("Completing job future [locNodeId=" + locNodeId + ", meta=" + meta + ']');
+
+                    finishFut.onDone(jobId, meta.failCause());
+                }
+
+                assert job != null;
+
+                if (ctx.jobUpdateLeader())
+                    job.cleanupStagingDirectory();
+
+                jobs.remove(jobId);
+
+                if (ctx.jobUpdateLeader()) {
+                    ClassLoader ldr = job.getClass().getClassLoader();
+
+                    try {
+                        String statWriterClsName = job.info().property(HadoopUtils.JOB_COUNTER_WRITER_PROPERTY);
+
+                        if (statWriterClsName != null) {
+                            Class<?> cls = ldr.loadClass(statWriterClsName);
+
+                            HadoopCounterWriter writer = (HadoopCounterWriter)cls.newInstance();
+
+                            HadoopCounters cntrs = meta.counters();
+
+                            writer.write(job, cntrs);
+                        }
+                    }
+                    catch (Exception e) {
+                        log.error("Can't write statistic due to: ", e);
+                    }
+                }
+
+                job.dispose(false);
+
+                break;
+            }
+
+            default:
+                throw new IllegalStateException("Unknown phase: " + meta.phase());
+        }
+    }
+
+    /**
+     * Creates setup task based on job information.
+     *
+     * @param jobId Job ID.
+     * @return Setup task wrapped in collection.
+     */
+    @Nullable private Collection<HadoopTaskInfo> setupTask(HadoopJobId jobId) {
+        if (activeJobs.containsKey(jobId))
+            return null;
+        else {
+            initState(jobId);
+
+            return Collections.singleton(new HadoopTaskInfo(SETUP, jobId, 0, 0, null));
+        }
+    }
+
+    /**
+     * Creates mapper tasks based on job information.
+     *
+     * @param mappers Mapper blocks.
+     * @param meta Job metadata.
+     * @return Collection of created task infos or {@code null} if no mapper tasks scheduled for local node.
+     */
+    private Collection<HadoopTaskInfo> mapperTasks(Iterable<HadoopInputSplit> mappers, HadoopJobMetadata meta) {
+        UUID locNodeId = ctx.localNodeId();
+        HadoopJobId jobId = meta.jobId();
+
+        JobLocalState state = activeJobs.get(jobId);
+
+        Collection<HadoopTaskInfo> tasks = null;
+
+        if (mappers != null) {
+            if (state == null)
+                state = initState(jobId);
+
+            for (HadoopInputSplit split : mappers) {
+                if (state.addMapper(split)) {
+                    if (log.isDebugEnabled())
+                        log.debug("Submitting MAP task for execution [locNodeId=" + locNodeId +
+                            ", split=" + split + ']');
+
+                    HadoopTaskInfo taskInfo = new HadoopTaskInfo(MAP, jobId, meta.taskNumber(split), 0, split);
+
+                    if (tasks == null)
+                        tasks = new ArrayList<>();
+
+                    tasks.add(taskInfo);
+                }
+            }
+        }
+
+        return tasks;
+    }
+
+    /**
+     * Creates reducer tasks based on job information.
+     *
+     * @param reducers Reducers (may be {@code null}).
+     * @param job Job instance.
+     * @return Collection of task infos.
+     */
+    private Collection<HadoopTaskInfo> reducerTasks(int[] reducers, HadoopJob job) {
+        UUID locNodeId = ctx.localNodeId();
+        HadoopJobId jobId = job.id();
+
+        JobLocalState state = activeJobs.get(jobId);
+
+        Collection<HadoopTaskInfo> tasks = null;
+
+        if (reducers != null) {
+            if (state == null)
+                state = initState(job.id());
+
+            for (int rdc : reducers) {
+                if (state.addReducer(rdc)) {
+                    if (log.isDebugEnabled())
+                        log.debug("Submitting REDUCE task for execution [locNodeId=" + locNodeId +
+                            ", rdc=" + rdc + ']');
+
+                    HadoopTaskInfo taskInfo = new HadoopTaskInfo(REDUCE, jobId, rdc, 0, null);
+
+                    if (tasks == null)
+                        tasks = new ArrayList<>();
+
+                    tasks.add(taskInfo);
+                }
+            }
+        }
+
+        return tasks;
+    }
+
+    /**
+     * Initializes local state for given job metadata.
+     *
+     * @param jobId Job ID.
+     * @return Local state.
+     */
+    private JobLocalState initState(HadoopJobId jobId) {
+        return F.addIfAbsent(activeJobs, jobId, new JobLocalState());
+    }
+
+    /**
+     * Gets or creates job instance.
+     *
+     * @param jobId Job ID.
+     * @param jobInfo Job info.
+     * @return Job.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public HadoopJob job(HadoopJobId jobId, @Nullable HadoopJobInfo jobInfo) throws IgniteCheckedException {
+        GridFutureAdapter<HadoopJob> fut = jobs.get(jobId);
+
+        if (fut != null || (fut = jobs.putIfAbsent(jobId, new GridFutureAdapter<HadoopJob>())) != null)
+            return fut.get();
+
+        fut = jobs.get(jobId);
+
+        HadoopJob job = null;
+
+        try {
+            if (jobInfo == null) {
+                HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+                if (meta == null)
+                    throw new IgniteCheckedException("Failed to find job metadata for ID: " + jobId);
+
+                jobInfo = meta.jobInfo();
+            }
+
+            job = jobInfo.createJob(jobCls, jobId, log, ctx.configuration().getNativeLibraryNames());
+
+            job.initialize(false, ctx.localNodeId());
+
+            fut.onDone(job);
+
+            return job;
+        }
+        catch (IgniteCheckedException e) {
+            fut.onDone(e);
+
+            jobs.remove(jobId, fut);
+
+            if (job != null) {
+                try {
+                    job.dispose(false);
+                }
+                catch (IgniteCheckedException e0) {
+                    U.error(log, "Failed to dispose job: " + jobId, e0);
+                }
+            }
+
+            throw e;
+        }
+    }
+
+    /**
+     * Kills job.
+     *
+     * @param jobId Job ID.
+     * @return {@code True} if job was killed.
+     * @throws IgniteCheckedException If failed.
+     */
+    public boolean killJob(HadoopJobId jobId) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock())
+            return false; // Grid is stopping.
+
+        try {
+            HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+            if (meta != null && meta.phase() != PHASE_COMPLETE && meta.phase() != PHASE_CANCELLING) {
+                HadoopTaskCancelledException err = new HadoopTaskCancelledException("Job cancelled.");
+
+                jobMetaCache().invoke(jobId, new CancelJobProcessor(null, err));
+            }
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+
+        IgniteInternalFuture<?> fut = finishFuture(jobId);
+
+        if (fut != null) {
+            try {
+                fut.get();
+            }
+            catch (Exception e) {
+                if (e.getCause() instanceof HadoopTaskCancelledException)
+                    return true;
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * Returns job counters.
+     *
+     * @param jobId Job identifier.
+     * @return Job counters or {@code null} if job cannot be found.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public HadoopCounters jobCounters(HadoopJobId jobId) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock())
+            return null;
+
+        try {
+            final HadoopJobMetadata meta = jobMetaCache().get(jobId);
+
+            return meta != null ? meta.counters() : null;
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /**
+     * Event handler protected by busy lock.
+     */
+    private abstract class EventHandler implements Runnable {
+        /** {@inheritDoc} */
+        @Override public void run() {
+            if (!busyLock.tryReadLock())
+                return;
+
+            try {
+                body();
+            }
+            catch (Throwable e) {
+                U.error(log, "Unhandled exception while processing event.", e);
+
+                if (e instanceof Error)
+                    throw (Error)e;
+            }
+            finally {
+                busyLock.readUnlock();
+            }
+        }
+
+        /**
+         * Handler body.
+         */
+        protected abstract void body() throws Exception;
+    }
+
+    /**
+     *
+     */
+    private class JobLocalState {
+        /** Mappers. */
+        private final Collection<HadoopInputSplit> currMappers = new HashSet<>();
+
+        /** Reducers. */
+        private final Collection<Integer> currReducers = new HashSet<>();
+
+        /** Number of completed mappers. */
+        private final AtomicInteger completedMappersCnt = new AtomicInteger();
+
+        /** Cancelled flag. */
+        private boolean cancelled;
+
+        /** Aborted flag. */
+        private boolean aborted;
+
+        /**
+         * @param mapSplit Map split to add.
+         * @return {@code True} if mapper was added.
+         */
+        private boolean addMapper(HadoopInputSplit mapSplit) {
+            return currMappers.add(mapSplit);
+        }
+
+        /**
+         * @param rdc Reducer number to add.
+         * @return {@code True} if reducer was added.
+         */
+        private boolean addReducer(int rdc) {
+            return currReducers.add(rdc);
+        }
+
+        /**
+         * Checks whether this split was scheduled for given attempt.
+         *
+         * @param mapSplit Map split to check.
+         * @return {@code True} if mapper was scheduled.
+         */
+        public boolean mapperScheduled(HadoopInputSplit mapSplit) {
+            return currMappers.contains(mapSplit);
+        }
+
+        /**
+         * Checks whether this split was scheduled for given attempt.
+         *
+         * @param rdc Reducer number to check.
+         * @return {@code True} if reducer was scheduled.
+         */
+        public boolean reducerScheduled(int rdc) {
+            return currReducers.contains(rdc);
+        }
+
+        /**
+         * @param taskInfo Task info.
+         * @param status Task status.
+         * @param prev Previous closure.
+         */
+        private void onSetupFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) {
+            final HadoopJobId jobId = taskInfo.jobId();
+
+            if (status.state() == FAILED || status.state() == CRASHED)
+                transform(jobId, new CancelJobProcessor(prev, status.failCause()));
+            else
+                transform(jobId, new UpdatePhaseProcessor(prev, PHASE_MAP));
+        }
+
+        /**
+         * @param taskInfo Task info.
+         * @param status Task status.
+         * @param prev Previous closure.
+         */
+        private void onMapFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status,
+            final StackedProcessor prev) {
+            final HadoopJobId jobId = taskInfo.jobId();
+
+            boolean lastMapperFinished = completedMappersCnt.incrementAndGet() == currMappers.size();
+
+            if (status.state() == FAILED || status.state() == CRASHED) {
+                // Fail the whole job.
+                transform(jobId, new RemoveMappersProcessor(prev, taskInfo.inputSplit(), status.failCause()));
+
+                return;
+            }
+
+            IgniteInClosure<IgniteInternalFuture<?>> cacheUpdater = new CIX1<IgniteInternalFuture<?>>() {
+                @Override public void applyx(IgniteInternalFuture<?> f) {
+                    Throwable err = null;
+
+                    if (f != null) {
+                        try {
+                            f.get();
+                        }
+                        catch (IgniteCheckedException e) {
+                            err = e;
+                        }
+                    }
+
+                    transform(jobId, new RemoveMappersProcessor(prev, taskInfo.inputSplit(), err));
+                }
+            };
+
+            if (lastMapperFinished)
+                ctx.shuffle().flush(jobId).listen(cacheUpdater);
+            else
+                cacheUpdater.apply(null);
+        }
+
+        /**
+         * @param taskInfo Task info.
+         * @param status Task status.
+         * @param prev Previous closure.
+         */
+        private void onReduceFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) {
+            HadoopJobId jobId = taskInfo.jobId();
+            if (status.state() == FAILED || status.state() == CRASHED)
+                // Fail the whole job.
+                transform(jobId, new RemoveReducerProcessor(prev, taskInfo.taskNumber(), status.failCause()));
+            else
+                transform(jobId, new RemoveReducerProcessor(prev, taskInfo.taskNumber()));
+        }
+
+        /**
+         * @param taskInfo Task info.
+         * @param status Task status.
+         * @param prev Previous closure.
+         */
+        private void onCombineFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status,
+            final StackedProcessor prev) {
+            final HadoopJobId jobId = taskInfo.jobId();
+
+            if (status.state() == FAILED || status.state() == CRASHED)
+                // Fail the whole job.
+                transform(jobId, new RemoveMappersProcessor(prev, currMappers, status.failCause()));
+            else {
+                ctx.shuffle().flush(jobId).listen(new CIX1<IgniteInternalFuture<?>>() {
+                    @Override public void applyx(IgniteInternalFuture<?> f) {
+                        Throwable err = null;
+
+                        if (f != null) {
+                            try {
+                                f.get();
+                            }
+                            catch (IgniteCheckedException e) {
+                                err = e;
+                            }
+                        }
+
+                        transform(jobId, new RemoveMappersProcessor(prev, currMappers, err));
+                    }
+                });
+            }
+        }
+
+        /**
+         * @return {@code True} if job was cancelled by this (first) call.
+         */
+        public boolean onCancel() {
+            if (!cancelled && !aborted) {
+                cancelled = true;
+
+                return true;
+            }
+
+            return false;
+        }
+
+        /**
+         * @return {@code True} if job was aborted this (first) call.
+         */
+        public boolean onAborted() {
+            if (!aborted) {
+                aborted = true;
+
+                return true;
+            }
+
+            return false;
+        }
+    }
+
+    /**
+     * Update job phase transform closure.
+     */
+    private static class UpdatePhaseProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Phase to update. */
+        private final HadoopJobPhase phase;
+
+        /**
+         * @param prev Previous closure.
+         * @param phase Phase to update.
+         */
+        private UpdatePhaseProcessor(@Nullable StackedProcessor prev, HadoopJobPhase phase) {
+            super(prev);
+
+            this.phase = phase;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            cp.phase(phase);
+        }
+    }
+
+    /**
+     * Remove mapper transform closure.
+     */
+    private static class RemoveMappersProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Mapper split to remove. */
+        private final Collection<HadoopInputSplit> splits;
+
+        /** Error. */
+        private final Throwable err;
+
+        /**
+         * @param prev Previous closure.
+         * @param split Mapper split to remove.
+         * @param err Error.
+         */
+        private RemoveMappersProcessor(@Nullable StackedProcessor prev, HadoopInputSplit split, Throwable err) {
+            this(prev, Collections.singletonList(split), err);
+        }
+
+        /**
+         * @param prev Previous closure.
+         * @param splits Mapper splits to remove.
+         * @param err Error.
+         */
+        private RemoveMappersProcessor(@Nullable StackedProcessor prev, Collection<HadoopInputSplit> splits,
+            Throwable err) {
+            super(prev);
+
+            this.splits = splits;
+            this.err = err;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            Map<HadoopInputSplit, Integer> splitsCp = new HashMap<>(cp.pendingSplits());
+
+            for (HadoopInputSplit s : splits)
+                splitsCp.remove(s);
+
+            cp.pendingSplits(splitsCp);
+
+            if (cp.phase() != PHASE_CANCELLING && err != null)
+                cp.failCause(err);
+
+            if (err != null)
+                cp.phase(PHASE_CANCELLING);
+
+            if (splitsCp.isEmpty()) {
+                if (cp.phase() != PHASE_CANCELLING)
+                    cp.phase(PHASE_REDUCE);
+            }
+        }
+    }
+
+    /**
+     * Remove reducer transform closure.
+     */
+    private static class RemoveReducerProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Mapper split to remove. */
+        private final int rdc;
+
+        /** Error. */
+        private Throwable err;
+
+        /**
+         * @param prev Previous closure.
+         * @param rdc Reducer to remove.
+         */
+        private RemoveReducerProcessor(@Nullable StackedProcessor prev, int rdc) {
+            super(prev);
+
+            this.rdc = rdc;
+        }
+
+        /**
+         * @param prev Previous closure.
+         * @param rdc Reducer to remove.
+         * @param err Error.
+         */
+        private RemoveReducerProcessor(@Nullable StackedProcessor prev, int rdc, Throwable err) {
+            super(prev);
+
+            this.rdc = rdc;
+            this.err = err;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            Collection<Integer> rdcCp = new HashSet<>(cp.pendingReducers());
+
+            rdcCp.remove(rdc);
+
+            cp.pendingReducers(rdcCp);
+
+            if (err != null) {
+                cp.phase(PHASE_CANCELLING);
+                cp.failCause(err);
+            }
+        }
+    }
+
+    /**
+     * Initialize reducers.
+     */
+    private static class InitializeReducersProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Reducers. */
+        private final Collection<Integer> rdc;
+
+        /** Process descriptor for reducers. */
+        private final HadoopProcessDescriptor desc;
+
+        /**
+         * @param prev Previous closure.
+         * @param rdc Reducers to initialize.
+         * @param desc External process descriptor.
+         */
+        private InitializeReducersProcessor(@Nullable StackedProcessor prev,
+            Collection<Integer> rdc,
+            HadoopProcessDescriptor desc) {
+            super(prev);
+
+            assert !F.isEmpty(rdc);
+            assert desc != null;
+
+            this.rdc = rdc;
+            this.desc = desc;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            Map<Integer, HadoopProcessDescriptor> oldMap = meta.reducersAddresses();
+
+            Map<Integer, HadoopProcessDescriptor> rdcMap = oldMap == null ?
+                new HashMap<Integer, HadoopProcessDescriptor>() : new HashMap<>(oldMap);
+
+            for (Integer r : rdc)
+                rdcMap.put(r, desc);
+
+            cp.reducersAddresses(rdcMap);
+        }
+    }
+
+    /**
+     * Remove reducer transform closure.
+     */
+    private static class CancelJobProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Mapper split to remove. */
+        private final Collection<HadoopInputSplit> splits;
+
+        /** Reducers to remove. */
+        private final Collection<Integer> rdc;
+
+        /** Error. */
+        private final Throwable err;
+
+        /**
+         * @param prev Previous closure.
+         * @param err Fail cause.
+         */
+        private CancelJobProcessor(@Nullable StackedProcessor prev, Throwable err) {
+            this(prev, err, null, null);
+        }
+
+        /**
+         * @param prev Previous closure.
+         * @param splits Splits to remove.
+         * @param rdc Reducers to remove.
+         */
+        private CancelJobProcessor(@Nullable StackedProcessor prev,
+            Collection<HadoopInputSplit> splits,
+            Collection<Integer> rdc) {
+            this(prev, null, splits, rdc);
+        }
+
+        /**
+         * @param prev Previous closure.
+         * @param err Error.
+         * @param splits Splits to remove.
+         * @param rdc Reducers to remove.
+         */
+        private CancelJobProcessor(@Nullable StackedProcessor prev,
+            Throwable err,
+            Collection<HadoopInputSplit> splits,
+            Collection<Integer> rdc) {
+            super(prev);
+
+            this.splits = splits;
+            this.rdc = rdc;
+            this.err = err;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            final HadoopJobPhase currPhase = meta.phase();
+
+            assert currPhase == PHASE_CANCELLING || currPhase == PHASE_COMPLETE
+                    || err != null: "Invalid phase for cancel: " + currPhase;
+
+            Collection<Integer> rdcCp = new HashSet<>(cp.pendingReducers());
+
+            if (rdc != null)
+                rdcCp.removeAll(rdc);
+
+            cp.pendingReducers(rdcCp);
+
+            Map<HadoopInputSplit, Integer> splitsCp = new HashMap<>(cp.pendingSplits());
+
+            if (splits != null) {
+                for (HadoopInputSplit s : splits)
+                    splitsCp.remove(s);
+            }
+
+            cp.pendingSplits(splitsCp);
+
+            if (currPhase != PHASE_COMPLETE && currPhase != PHASE_CANCELLING)
+                cp.phase(PHASE_CANCELLING);
+
+            if (err != null)
+                cp.failCause(err);
+        }
+    }
+
+    /**
+     * Increment counter values closure.
+     */
+    private static class IncrementCountersProcessor extends StackedProcessor {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private final HadoopCounters counters;
+
+        /**
+         * @param prev Previous closure.
+         * @param counters Task counters to add into job counters.
+         */
+        private IncrementCountersProcessor(@Nullable StackedProcessor prev, HadoopCounters counters) {
+            super(prev);
+
+            assert counters != null;
+
+            this.counters = counters;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
+            HadoopCounters cntrs = new HadoopCountersImpl(cp.counters());
+
+            cntrs.merge(counters);
+
+            cp.counters(cntrs);
+        }
+    }
+
+    /**
+     * Abstract stacked closure.
+     */
+    private abstract static class StackedProcessor implements
+        EntryProcessor<HadoopJobId, HadoopJobMetadata, Void>, Serializable {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private final StackedProcessor prev;
+
+        /**
+         * @param prev Previous closure.
+         */
+        private StackedProcessor(@Nullable StackedProcessor prev) {
+            this.prev = prev;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Void process(MutableEntry<HadoopJobId, HadoopJobMetadata> e, Object... args) {
+            HadoopJobMetadata val = apply(e.getValue());
+
+            if (val != null)
+                e.setValue(val);
+            else
+                e.remove();
+
+            return null;
+        }
+
+        /**
+         * @param meta Old value.
+         * @return New value.
+         */
+        private HadoopJobMetadata apply(HadoopJobMetadata meta) {
+            if (meta == null)
+                return null;
+
+            HadoopJobMetadata cp = prev != null ? prev.apply(meta) : new HadoopJobMetadata(meta);
+
+            update(meta, cp);
+
+            return cp;
+        }
+
+        /**
+         * Update given job metadata object.
+         *
+         * @param meta Initial job metadata.
+         * @param cp Copy.
+         */
+        protected abstract void update(HadoopJobMetadata meta, HadoopJobMetadata cp);
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
new file mode 100644
index 0000000..0d7bd3a
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.message;
+
+import java.io.Externalizable;
+
+/**
+ * Marker interface for all hadoop messages.
+ */
+public interface HadoopMessage extends Externalizable {
+    // No-op.
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
new file mode 100644
index 0000000..5f96e08
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.mapreduce.Cluster;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.QueueAclsInfo;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskReport;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.LogParams;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.token.Token;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.client.GridClient;
+import org.apache.ignite.internal.client.GridClientException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobProperty;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceCounters;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_SUBMISSION_START_TS_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.REQ_NEW_JOBID_TS_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.RESPONSE_NEW_JOBID_TS_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Hadoop client protocol.
+ */
+public class HadoopClientProtocol implements ClientProtocol {
+    /** Protocol version. */
+    private static final long PROTO_VER = 1L;
+
+    /** Default Ignite system directory. */
+    private static final String SYS_DIR = ".ignite/system";
+
+    /** Configuration. */
+    private final Configuration conf;
+
+    /** Ignite client. */
+    private volatile GridClient cli;
+
+    /** Last received version. */
+    private long lastVer = -1;
+
+    /** Last received status. */
+    private HadoopJobStatus lastStatus;
+
+    /**
+     * Constructor.
+     *
+     * @param conf Configuration.
+     * @param cli Ignite client.
+     */
+    public HadoopClientProtocol(Configuration conf, GridClient cli) {
+        assert cli != null;
+
+        this.conf = conf;
+        this.cli = cli;
+    }
+
+    /** {@inheritDoc} */
+    @Override public JobID getNewJobID() throws IOException, InterruptedException {
+        try {
+            conf.setLong(REQ_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
+
+            HadoopJobId jobID = cli.compute().execute(HadoopProtocolNextTaskIdTask.class.getName(), null);
+
+            conf.setLong(RESPONSE_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
+
+            return new JobID(jobID.globalId().toString(), jobID.localId());
+        }
+        catch (GridClientException e) {
+            throw new IOException("Failed to get new job ID.", e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts) throws IOException,
+        InterruptedException {
+        try {
+            conf.setLong(JOB_SUBMISSION_START_TS_PROPERTY, U.currentTimeMillis());
+
+            HadoopJobStatus status = cli.compute().execute(HadoopProtocolSubmitJobTask.class.getName(),
+                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), createJobInfo(conf)));
+
+            if (status == null)
+                throw new IOException("Failed to submit job (null status obtained): " + jobId);
+
+            return processStatus(status);
+        }
+        catch (GridClientException | IgniteCheckedException e) {
+            throw new IOException("Failed to submit job.", e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public ClusterMetrics getClusterMetrics() throws IOException, InterruptedException {
+        return new ClusterMetrics(0, 0, 0, 0, 0, 0, 1000, 1000, 1, 100, 0, 0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Cluster.JobTrackerStatus getJobTrackerStatus() throws IOException, InterruptedException {
+        return Cluster.JobTrackerStatus.RUNNING;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public AccessControlList getQueueAdmins(String queueName) throws IOException {
+        return new AccessControlList("*");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void killJob(JobID jobId) throws IOException, InterruptedException {
+        try {
+            cli.compute().execute(HadoopProtocolKillJobTask.class.getName(),
+                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()));
+        }
+        catch (GridClientException e) {
+            throw new IOException("Failed to kill job: " + jobId, e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setJobPriority(JobID jobid, String priority) throws IOException, InterruptedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException,
+        InterruptedException {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public JobStatus getJobStatus(JobID jobId) throws IOException, InterruptedException {
+        try {
+            Long delay = conf.getLong(HadoopJobProperty.JOB_STATUS_POLL_DELAY.propertyName(), -1);
+
+            HadoopProtocolTaskArguments args = delay >= 0 ?
+                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), delay) :
+                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId());
+
+            HadoopJobStatus status = cli.compute().execute(HadoopProtocolJobStatusTask.class.getName(), args);
+
+            if (status == null)
+                throw new IOException("Job tracker doesn't have any information about the job: " + jobId);
+
+            return processStatus(status);
+        }
+        catch (GridClientException e) {
+            throw new IOException("Failed to get job status: " + jobId, e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counters getJobCounters(JobID jobId) throws IOException, InterruptedException {
+        try {
+            final HadoopCounters counters = cli.compute().execute(HadoopProtocolJobCountersTask.class.getName(),
+                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()));
+
+            if (counters == null)
+                throw new IOException("Job tracker doesn't have any information about the job: " + jobId);
+
+            return new HadoopMapReduceCounters(counters);
+        }
+        catch (GridClientException e) {
+            throw new IOException("Failed to get job counters: " + jobId, e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException {
+        return new TaskReport[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getFilesystemName() throws IOException, InterruptedException {
+        return FileSystem.get(conf).getUri().toString();
+    }
+
+    /** {@inheritDoc} */
+    @Override public JobStatus[] getAllJobs() throws IOException, InterruptedException {
+        return new JobStatus[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid, int fromEventId, int maxEvents)
+        throws IOException, InterruptedException {
+        return new TaskCompletionEvent[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException, InterruptedException {
+        return new String[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException {
+        return new TaskTrackerInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException, InterruptedException {
+        return new TaskTrackerInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getSystemDir() throws IOException, InterruptedException {
+        Path sysDir = new Path(SYS_DIR);
+
+        return sysDir.toString();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getStagingAreaDir() throws IOException, InterruptedException {
+        String usr = UserGroupInformation.getCurrentUser().getShortUserName();
+
+        return HadoopUtils.stagingAreaDir(conf, usr).toString();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getJobHistoryDir() throws IOException, InterruptedException {
+        return JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
+    }
+
+    /** {@inheritDoc} */
+    @Override public QueueInfo[] getQueues() throws IOException, InterruptedException {
+        return new QueueInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public QueueInfo getQueue(String queueName) throws IOException, InterruptedException {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException, InterruptedException {
+        return new QueueAclsInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
+        return new QueueInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public QueueInfo[] getChildQueues(String queueName) throws IOException, InterruptedException {
+        return new QueueInfo[0];
+    }
+
+    /** {@inheritDoc} */
+    @Override public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException,
+        InterruptedException {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long renewDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException,
+        InterruptedException {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException,
+        InterruptedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException,
+        InterruptedException {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getProtocolVersion(String protocol, long clientVersion) throws IOException {
+        return PROTO_VER;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int clientMethodsHash)
+        throws IOException {
+        return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion, clientMethodsHash);
+    }
+
+    /**
+     * Process received status update.
+     *
+     * @param status Ignite status.
+     * @return Hadoop status.
+     */
+    private JobStatus processStatus(HadoopJobStatus status) {
+        // IMPORTANT! This method will only work in single-threaded environment. It is valid at the moment because
+        // IgniteHadoopClientProtocolProvider creates new instance of this class for every new job and Job class
+        // serializes invocations of submitJob() and getJobStatus() methods. However, if any of these conditions will
+        // change in future and either protocol will serve statuses for several jobs or status update will not be
+        // serialized anymore, then we have to fallback to concurrent approach (e.g. using ConcurrentHashMap).
+        // (vozerov)
+        if (lastVer < status.version()) {
+            lastVer = status.version();
+
+            lastStatus = status;
+        }
+        else
+            assert lastStatus != null;
+
+        return HadoopUtils.status(lastStatus, conf);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
new file mode 100644
index 0000000..8f0271c
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+
+/**
+ * Task to get job counters.
+ */
+public class HadoopProtocolJobCountersTask extends HadoopProtocolTaskAdapter<HadoopCounters> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public HadoopCounters run(ComputeJobContext jobCtx, Hadoop hadoop,
+        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
+
+        UUID nodeId = UUID.fromString(args.<String>get(0));
+        Integer id = args.get(1);
+
+        assert nodeId != null;
+        assert id != null;
+
+        return hadoop.counters(new HadoopJobId(nodeId, id));
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
new file mode 100644
index 0000000..c08fe77
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.lang.IgniteInClosure;
+
+/**
+ * Job status task.
+ */
+public class HadoopProtocolJobStatusTask extends HadoopProtocolTaskAdapter<HadoopJobStatus> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Default poll delay */
+    private static final long DFLT_POLL_DELAY = 100L;
+
+    /** Attribute for held status. */
+    private static final String ATTR_HELD = "held";
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobStatus run(final ComputeJobContext jobCtx, Hadoop hadoop,
+        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
+        UUID nodeId = UUID.fromString(args.<String>get(0));
+        Integer id = args.get(1);
+        Long pollDelay = args.get(2);
+
+        assert nodeId != null;
+        assert id != null;
+
+        HadoopJobId jobId = new HadoopJobId(nodeId, id);
+
+        if (pollDelay == null)
+            pollDelay = DFLT_POLL_DELAY;
+
+        if (pollDelay > 0) {
+            IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
+
+            if (fut != null) {
+                if (fut.isDone() || F.eq(jobCtx.getAttribute(ATTR_HELD), true))
+                    return hadoop.status(jobId);
+                else {
+                    fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
+                        @Override public void apply(IgniteInternalFuture<?> fut0) {
+                            jobCtx.callcc();
+                        }
+                    });
+
+                    jobCtx.setAttribute(ATTR_HELD, true);
+
+                    return jobCtx.holdcc(pollDelay);
+                }
+            }
+            else
+                return null;
+        }
+        else
+            return hadoop.status(jobId);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
new file mode 100644
index 0000000..0f65664
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+
+/**
+ * Kill job task.
+ */
+public class HadoopProtocolKillJobTask extends HadoopProtocolTaskAdapter<Boolean> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public Boolean run(ComputeJobContext jobCtx, Hadoop hadoop,
+        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
+        UUID nodeId = UUID.fromString(args.<String>get(0));
+        Integer id = args.get(1);
+
+        assert nodeId != null;
+        assert id != null;
+
+        HadoopJobId jobId = new HadoopJobId(nodeId, id);
+
+        return hadoop.kill(jobId);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
new file mode 100644
index 0000000..bde7821
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+
+/**
+ * Task to get the next job ID.
+ */
+public class HadoopProtocolNextTaskIdTask extends HadoopProtocolTaskAdapter<HadoopJobId> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobId run(ComputeJobContext jobCtx, Hadoop hadoop,
+        HadoopProtocolTaskArguments args) {
+        return hadoop.nextJobId();
+    }
+}
\ No newline at end of file


[47/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
new file mode 100644
index 0000000..2484492
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumFs;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.local.LocalConfigKeys;
+
+import static org.apache.hadoop.fs.FsConstants.LOCAL_FS_URI;
+
+/**
+ * Local file system replacement for Hadoop jobs.
+ */
+public class HadoopLocalFileSystemV2 extends ChecksumFs {
+    /**
+     * Creates new local file system.
+     *
+     * @param cfg Configuration.
+     * @throws IOException If failed.
+     * @throws URISyntaxException If failed.
+     */
+    public HadoopLocalFileSystemV2(Configuration cfg) throws IOException, URISyntaxException {
+        super(new DelegateFS(cfg));
+    }
+
+    /**
+     * Creates new local file system.
+     *
+     * @param uri URI.
+     * @param cfg Configuration.
+     * @throws IOException If failed.
+     * @throws URISyntaxException If failed.
+     */
+    public HadoopLocalFileSystemV2(URI uri, Configuration cfg) throws IOException, URISyntaxException {
+        this(cfg);
+    }
+
+    /**
+     * Delegate file system.
+     */
+    private static class DelegateFS extends DelegateToFileSystem {
+        /**
+         * Creates new local file system.
+         *
+         * @param cfg Configuration.
+         * @throws IOException If failed.
+         * @throws URISyntaxException If failed.
+         */
+        public DelegateFS(Configuration cfg) throws IOException, URISyntaxException {
+            super(LOCAL_FS_URI, new HadoopRawLocalFileSystem(), cfg, LOCAL_FS_URI.getScheme(), false);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int getUriDefaultPort() {
+            return -1;
+        }
+
+        /** {@inheritDoc} */
+        @Override public FsServerDefaults getServerDefaults() throws IOException {
+            return LocalConfigKeys.getServerDefaults();
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isValidName(String src) {
+            return true;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
new file mode 100644
index 0000000..0aac4a3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+/**
+ * This class lists parameters that can be specified in Hadoop configuration.
+ * Hadoop configuration can be specified in {@code core-site.xml} file
+ * or passed to map-reduce task directly when using Hadoop driver for IGFS file system:
+ * <ul>
+ *     <li>
+ *         {@code fs.igfs.[name].open.sequential_reads_before_prefetch} - this parameter overrides
+ *         the one specified in {@link org.apache.ignite.configuration.FileSystemConfiguration#getSequentialReadsBeforePrefetch()}
+ *         IGFS data node configuration property.
+ *     </li>
+ *     <li>
+ *         {@code fs.igfs.[name].log.enabled} - specifies whether IGFS sampling logger is enabled. If
+ *         {@code true}, then all file system operations will be logged to a file.
+ *     </li>
+ *     <li>{@code fs.igfs.[name].log.dir} - specifies log directory where sampling log files should be placed.</li>
+ *     <li>
+ *         {@code fs.igfs.[name].log.batch_size} - specifies how many log entries are accumulated in a batch before
+ *         it gets flushed to log file. Higher values will imply greater performance, but will increase delay
+ *         before record appears in the log file.
+ *     </li>
+ *     <li>
+ *         {@code fs.igfs.[name].colocated.writes} - specifies whether written files should be colocated on data
+ *         node to which client is connected. If {@code true}, file will not be distributed and will be written
+ *         to a single data node. Default value is {@code true}.
+ *     </li>
+ *     <li>
+ *         {@code fs.igfs.prefer.local.writes} - specifies whether file preferably should be written to
+ *         local data node if it has enough free space. After some time it can be redistributed across nodes though.
+ *     </li>
+ * </ul>
+ * Where {@code [name]} is file system endpoint which you specify in file system URI authority part. E.g. in
+ * case your file system URI is {@code igfs://127.0.0.1:10500} then {@code name} will be {@code 127.0.0.1:10500}.
+ * <p>
+ * Sample configuration that can be placed to {@code core-site.xml} file:
+ * <pre name="code" class="xml">
+ *     &lt;property&gt;
+ *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.enabled&lt;/name&gt;
+ *         &lt;value&gt;true&lt;/value&gt;
+ *     &lt;/property&gt;
+ *     &lt;property&gt;
+ *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.dir&lt;/name&gt;
+ *         &lt;value&gt;/home/apache/ignite/log/sampling&lt;/value&gt;
+ *     &lt;/property&gt;
+ *     &lt;property&gt;
+ *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.batch_size&lt;/name&gt;
+ *         &lt;value&gt;16&lt;/value&gt;
+ *     &lt;/property&gt;
+ * </pre>
+ * Parameters could also be specified per mapreduce job, e.g.
+ * <pre name="code" class="bash">
+ * hadoop jar myjarfile.jar MyMapReduceJob -Dfs.igfs.open.sequential_reads_before_prefetch=4
+ * </pre>
+ * If you want to use these parameters in code, then you have to substitute you file system name in it. The easiest
+ * way to do that is {@code String.format(PARAM_IGFS_COLOCATED_WRITES, [name])}.
+ */
+public class HadoopParameters {
+    /** Parameter name for control over file colocation write mode. */
+    public static final String PARAM_IGFS_COLOCATED_WRITES = "fs.igfs.%s.colocated.writes";
+
+    /** Parameter name for custom sequential reads before prefetch value. */
+    public static final String PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH =
+        "fs.igfs.%s.open.sequential_reads_before_prefetch";
+
+    /** Parameter name for client logger directory. */
+    public static final String PARAM_IGFS_LOG_DIR = "fs.igfs.%s.log.dir";
+
+    /** Parameter name for log batch size. */
+    public static final String PARAM_IGFS_LOG_BATCH_SIZE = "fs.igfs.%s.log.batch_size";
+
+    /** Parameter name for log enabled flag. */
+    public static final String PARAM_IGFS_LOG_ENABLED = "fs.igfs.%s.log.enabled";
+
+    /** Parameter name for prefer local writes flag. */
+    public static final String PARAM_IGFS_PREFER_LOCAL_WRITES = "fs.igfs.prefer.local.writes";
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
new file mode 100644
index 0000000..b8fc8e7
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+import java.net.URI;
+import java.nio.file.Files;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Local file system implementation for Hadoop.
+ */
+public class HadoopRawLocalFileSystem extends FileSystem {
+    /** Working directory for each thread. */
+    private final ThreadLocal<Path> workDir = new ThreadLocal<Path>() {
+        @Override protected Path initialValue() {
+            return getInitialWorkingDirectory();
+        }
+    };
+
+    /**
+     * Converts Hadoop path to local path.
+     *
+     * @param path Hadoop path.
+     * @return Local path.
+     */
+    File convert(Path path) {
+        checkPath(path);
+
+        if (path.isAbsolute())
+            return new File(path.toUri().getPath());
+
+        return new File(getWorkingDirectory().toUri().getPath(), path.toUri().getPath());
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getHomeDirectory() {
+        return makeQualified(new Path(System.getProperty("user.home")));
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getInitialWorkingDirectory() {
+        File f = new File(System.getProperty("user.dir"));
+
+        return new Path(f.getAbsoluteFile().toURI()).makeQualified(getUri(), null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void initialize(URI uri, Configuration conf) throws IOException {
+        super.initialize(uri, conf);
+
+        setConf(conf);
+
+        String initWorkDir = conf.get(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP);
+
+        if (initWorkDir != null)
+            setWorkingDirectory(new Path(initWorkDir));
+    }
+
+    /** {@inheritDoc} */
+    @Override public URI getUri() {
+        return FsConstants.LOCAL_FS_URI;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+        return new FSDataInputStream(new InStream(checkExists(convert(f))));
+    }
+
+    /** {@inheritDoc} */
+    @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufSize,
+        short replication, long blockSize, Progressable progress) throws IOException {
+        File file = convert(f);
+
+        if (!overwrite && !file.createNewFile())
+            throw new IOException("Failed to create new file: " + f.toUri());
+
+        return out(file, false, bufSize);
+    }
+
+    /**
+     * @param file File.
+     * @param append Append flag.
+     * @return Output stream.
+     * @throws IOException If failed.
+     */
+    private FSDataOutputStream out(File file, boolean append, int bufSize) throws IOException {
+        return new FSDataOutputStream(new BufferedOutputStream(new FileOutputStream(file, append),
+            bufSize < 32 * 1024 ? 32 * 1024 : bufSize), new Statistics(getUri().getScheme()));
+    }
+
+    /** {@inheritDoc} */
+    @Override public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
+        return out(convert(f), true, bufSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean rename(Path src, Path dst) throws IOException {
+        return convert(src).renameTo(convert(dst));
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean delete(Path f, boolean recursive) throws IOException {
+        File file = convert(f);
+
+        if (file.isDirectory() && !recursive)
+            throw new IOException("Failed to remove directory in non recursive mode: " + f.toUri());
+
+        return U.delete(file);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setWorkingDirectory(Path dir) {
+        workDir.set(fixRelativePart(dir));
+
+        checkPath(dir);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getWorkingDirectory() {
+        return workDir.get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+        if(f == null)
+            throw new IllegalArgumentException("mkdirs path arg is null");
+
+        Path parent = f.getParent();
+
+        File p2f = convert(f);
+
+        if(parent != null) {
+            File parent2f = convert(parent);
+
+            if(parent2f != null && parent2f.exists() && !parent2f.isDirectory())
+                throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
+
+        }
+
+        return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus getFileStatus(Path f) throws IOException {
+        return fileStatus(checkExists(convert(f)));
+    }
+
+    /**
+     * @return File status.
+     */
+    private FileStatus fileStatus(File file) throws IOException {
+        boolean dir = file.isDirectory();
+
+        java.nio.file.Path path = dir ? null : file.toPath();
+
+        return new FileStatus(dir ? 0 : file.length(), dir, 1, 4 * 1024, file.lastModified(), file.lastModified(),
+            /*permission*/null, /*owner*/null, /*group*/null, dir ? null : Files.isSymbolicLink(path) ?
+            new Path(Files.readSymbolicLink(path).toUri()) : null, new Path(file.toURI()));
+    }
+
+    /**
+     * @param file File.
+     * @return Same file.
+     * @throws FileNotFoundException If does not exist.
+     */
+    private static File checkExists(File file) throws FileNotFoundException {
+        if (!file.exists())
+            throw new FileNotFoundException("File " + file.getAbsolutePath() + " does not exist.");
+
+        return file;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus[] listStatus(Path f) throws IOException {
+        File file = convert(f);
+
+        if (checkExists(file).isFile())
+            return new FileStatus[] {fileStatus(file)};
+
+        File[] files = file.listFiles();
+
+        FileStatus[] res = new FileStatus[files.length];
+
+        for (int i = 0; i < res.length; i++)
+            res[i] = fileStatus(files[i]);
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean supportsSymlinks() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void createSymlink(Path target, Path link, boolean createParent) throws IOException {
+        Files.createSymbolicLink(convert(link).toPath(), convert(target).toPath());
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus getFileLinkStatus(Path f) throws IOException {
+        return getFileStatus(getLinkTarget(f));
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getLinkTarget(Path f) throws IOException {
+        File file = Files.readSymbolicLink(convert(f).toPath()).toFile();
+
+        return new Path(file.toURI());
+    }
+
+    /**
+     * Input stream.
+     */
+    private static class InStream extends InputStream implements Seekable, PositionedReadable {
+        /** */
+        private final RandomAccessFile file;
+
+        /**
+         * @param f File.
+         * @throws IOException If failed.
+         */
+        public InStream(File f) throws IOException {
+            file = new RandomAccessFile(f, "r");
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized int read() throws IOException {
+            return file.read();
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized int read(byte[] b, int off, int len) throws IOException {
+            return file.read(b, off, len);
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized void close() throws IOException {
+            file.close();
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized int read(long pos, byte[] buf, int off, int len) throws IOException {
+            long pos0 = file.getFilePointer();
+
+            file.seek(pos);
+            int res = file.read(buf, off, len);
+
+            file.seek(pos0);
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readFully(long pos, byte[] buf, int off, int len) throws IOException {
+            if (read(pos, buf, off, len) != len)
+                throw new IOException();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readFully(long pos, byte[] buf) throws IOException {
+            readFully(pos, buf, 0, buf.length);
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized void seek(long pos) throws IOException {
+            file.seek(pos);
+        }
+
+        /** {@inheritDoc} */
+        @Override public synchronized long getPos() throws IOException {
+            return file.getFilePointer();
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean seekToNewSource(long targetPos) throws IOException {
+            return false;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
new file mode 100644
index 0000000..fe43596
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathSummary;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsStatus;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Facade for communication with grid.
+ */
+public interface HadoopIgfs {
+    /**
+     * Perform handshake.
+     *
+     * @param logDir Log directory.
+     * @return Future with handshake result.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgfsHandshakeResponse handshake(String logDir) throws IgniteCheckedException, IOException;
+
+    /**
+     * Close connection.
+     *
+     * @param force Force flag.
+     */
+    public void close(boolean force);
+
+    /**
+     * Command to retrieve file info for some IGFS path.
+     *
+     * @param path Path to get file info for.
+     * @return Future for info operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgfsFile info(IgfsPath path) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to update file properties.
+     *
+     * @param path IGFS path to update properties.
+     * @param props Properties to update.
+     * @return Future for update operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteCheckedException, IOException;
+
+    /**
+     * Sets last access time and last modification time for a file.
+     *
+     * @param path Path to update times.
+     * @param accessTime Last access time to set.
+     * @param modificationTime Last modification time to set.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Boolean setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteCheckedException,
+        IOException;
+
+    /**
+     * Command to rename given path.
+     *
+     * @param src Source path.
+     * @param dest Destination path.
+     * @return Future for rename operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Boolean rename(IgfsPath src, IgfsPath dest) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to delete given path.
+     *
+     * @param path Path to delete.
+     * @param recursive {@code True} if deletion is recursive.
+     * @return Future for delete operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Boolean delete(IgfsPath path, boolean recursive) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to get affinity for given path, offset and length.
+     *
+     * @param path Path to get affinity for.
+     * @param start Start position (offset).
+     * @param len Data length.
+     * @return Future for affinity command.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) throws IgniteCheckedException,
+        IOException;
+
+    /**
+     * Gets path summary.
+     *
+     * @param path Path to get summary for.
+     * @return Future that will be completed when summary is received.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgfsPathSummary contentSummary(IgfsPath path) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to create directories.
+     *
+     * @param path Path to create.
+     * @return Future for mkdirs operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Boolean mkdirs(IgfsPath path, Map<String, String> props) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to get list of files in directory.
+     *
+     * @param path Path to list.
+     * @return Future for listFiles operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to get directory listing.
+     *
+     * @param path Path to list.
+     * @return Future for listPaths operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteCheckedException, IOException;
+
+    /**
+     * Performs status request.
+     *
+     * @return Status response.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgfsStatus fsStatus() throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to open file for reading.
+     *
+     * @param path File path to open.
+     * @return Future for open operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException, IOException;
+
+    /**
+     * Command to open file for reading.
+     *
+     * @param path File path to open.
+     * @return Future for open operation.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopIgfsStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException,
+        IOException;
+
+    /**
+     * Command to create file and open it for output.
+     *
+     * @param path Path to file.
+     * @param overwrite If {@code true} then old file contents will be lost.
+     * @param colocate If {@code true} and called on data node, file will be written on that node.
+     * @param replication Replication factor.
+     * @param props File properties for creation.
+     * @return Stream descriptor.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate,
+        int replication, long blockSize, @Nullable Map<String, String> props) throws IgniteCheckedException, IOException;
+
+    /**
+     * Open file for output appending data to the end of a file.
+     *
+     * @param path Path to file.
+     * @param create If {@code true}, file will be created if does not exist.
+     * @param props File properties.
+     * @return Stream descriptor.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create,
+        @Nullable Map<String, String> props) throws IgniteCheckedException, IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
new file mode 100644
index 0000000..d610091
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.ignite.IgniteCheckedException;
+
+/**
+ * Communication exception indicating a problem between file system and IGFS instance.
+ */
+public class HadoopIgfsCommunicationException extends IgniteCheckedException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Creates new exception with given throwable as a nested cause and
+     * source of error message.
+     *
+     * @param cause Non-null throwable cause.
+     */
+    public HadoopIgfsCommunicationException(Exception cause) {
+        super(cause);
+    }
+
+    /**
+     * Creates a new exception with given error message and optional nested cause exception.
+     *
+     * @param msg Error message.
+     */
+    public HadoopIgfsCommunicationException(String msg) {
+        super(msg);
+    }
+
+    /**
+     * Creates a new exception with given error message and optional nested cause exception.
+     *
+     * @param msg Error message.
+     * @param cause Cause.
+     */
+    public HadoopIgfsCommunicationException(String msg, Exception cause) {
+        super(msg, cause);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
new file mode 100644
index 0000000..014e2a1
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Extended IGFS server interface.
+ */
+public interface HadoopIgfsEx extends HadoopIgfs {
+    /**
+     * Adds event listener that will be invoked when connection with server is lost or remote error has occurred.
+     * If connection is closed already, callback will be invoked synchronously inside this method.
+     *
+     * @param delegate Stream delegate.
+     * @param lsnr Event listener.
+     */
+    public void addEventListener(HadoopIgfsStreamDelegate delegate, HadoopIgfsStreamEventListener lsnr);
+
+    /**
+     * Removes event listener that will be invoked when connection with server is lost or remote error has occurred.
+     *
+     * @param delegate Stream delegate.
+     */
+    public void removeEventListener(HadoopIgfsStreamDelegate delegate);
+
+    /**
+     * Asynchronously reads specified amount of bytes from opened input stream.
+     *
+     * @param delegate Stream delegate.
+     * @param pos Position to read from.
+     * @param len Data length to read.
+     * @param outBuf Optional output buffer. If buffer length is less then {@code len}, all remaining
+     *     bytes will be read into new allocated buffer of length {len - outBuf.length} and this buffer will
+     *     be the result of read future.
+     * @param outOff Output offset.
+     * @param outLen Output length.
+     * @return Read data.
+     */
+    public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len,
+        @Nullable final byte[] outBuf, final int outOff, final int outLen);
+
+    /**
+     * Writes data to the stream with given streamId. This method does not return any future since
+     * no response to write request is sent.
+     *
+     * @param delegate Stream delegate.
+     * @param data Data to write.
+     * @param off Offset.
+     * @param len Length.
+     * @throws IOException If failed.
+     */
+    public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException;
+
+    /**
+     * Close server stream.
+     *
+     * @param delegate Stream delegate.
+     * @throws IOException If failed.
+     */
+    public void closeStream(HadoopIgfsStreamDelegate delegate) throws IOException;
+
+    /**
+     * Flush output stream.
+     *
+     * @param delegate Stream delegate.
+     * @throws IOException If failed.
+     */
+    public void flush(HadoopIgfsStreamDelegate delegate) throws IOException;
+
+    /**
+     * The user this Igfs instance works on behalf of.
+     * @return the user name.
+     */
+    public String user();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
new file mode 100644
index 0000000..5ff1b2e
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * IGFS client future that holds response parse closure.
+ */
+public class HadoopIgfsFuture<T> extends GridFutureAdapter<T> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Output buffer. */
+    private byte[] outBuf;
+
+    /** Output offset. */
+    private int outOff;
+
+    /** Output length. */
+    private int outLen;
+
+    /** Read future flag. */
+    private boolean read;
+
+    /**
+     * @return Output buffer.
+     */
+    public byte[] outputBuffer() {
+        return outBuf;
+    }
+
+    /**
+     * @param outBuf Output buffer.
+     */
+    public void outputBuffer(@Nullable byte[] outBuf) {
+        this.outBuf = outBuf;
+    }
+
+    /**
+     * @return Offset in output buffer to write from.
+     */
+    public int outputOffset() {
+        return outOff;
+    }
+
+    /**
+     * @param outOff Offset in output buffer to write from.
+     */
+    public void outputOffset(int outOff) {
+        this.outOff = outOff;
+    }
+
+    /**
+     * @return Length to write to output buffer.
+     */
+    public int outputLength() {
+        return outLen;
+    }
+
+    /**
+     * @param outLen Length to write to output buffer.
+     */
+    public void outputLength(int outLen) {
+        this.outLen = outLen;
+    }
+
+    /**
+     * @param read {@code True} if this is a read future.
+     */
+    public void read(boolean read) {
+        this.read = read;
+    }
+
+    /**
+     * @return {@code True} if this is a read future.
+     */
+    public boolean read() {
+        return read;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
new file mode 100644
index 0000000..3220538
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.commons.logging.Log;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsInputStream;
+import org.apache.ignite.igfs.IgfsOutputStream;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathSummary;
+import org.apache.ignite.igfs.IgfsUserContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsStatus;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.lang.IgniteOutClosure;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Communication with grid in the same process.
+ */
+public class HadoopIgfsInProc implements HadoopIgfsEx {
+    /** Target IGFS. */
+    private final IgfsEx igfs;
+
+    /** Buffer size. */
+    private final int bufSize;
+
+    /** Event listeners. */
+    private final Map<HadoopIgfsStreamDelegate, HadoopIgfsStreamEventListener> lsnrs =
+        new ConcurrentHashMap<>();
+
+    /** Logger. */
+    private final Log log;
+
+    /** The user this Igfs works on behalf of. */
+    private final String user;
+
+    /**
+     * Constructor.
+     *
+     * @param igfs Target IGFS.
+     * @param log Log.
+     */
+    public HadoopIgfsInProc(IgfsEx igfs, Log log, String userName) throws IgniteCheckedException {
+        this.user = IgfsUtils.fixUserName(userName);
+
+        this.igfs = igfs;
+
+        this.log = log;
+
+        bufSize = igfs.configuration().getBlockSize() * 2;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsHandshakeResponse handshake(final String logDir) {
+        return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsHandshakeResponse>() {
+            @Override public IgfsHandshakeResponse apply() {
+                igfs.clientLogDirectory(logDir);
+
+                return new IgfsHandshakeResponse(igfs.name(), igfs.proxyPaths(), igfs.groupBlockSize(),
+                    igfs.globalSampling());
+                }
+         });
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close(boolean force) {
+        // Perform cleanup.
+        for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) {
+            try {
+                lsnr.onClose();
+            }
+            catch (IgniteCheckedException e) {
+                if (log.isDebugEnabled())
+                    log.debug("Failed to notify stream event listener", e);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile info(final IgfsPath path) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsFile>() {
+                @Override public IgfsFile apply() {
+                    return igfs.info(path);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to get file info because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile update(final IgfsPath path, final Map<String, String> props) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsFile>() {
+                @Override public IgfsFile apply() {
+                    return igfs.update(path, props);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to update file because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean setTimes(final IgfsPath path, final long accessTime, final long modificationTime) throws IgniteCheckedException {
+        try {
+            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
+                @Override public Void apply() {
+                    igfs.setTimes(path, accessTime, modificationTime);
+
+                    return null;
+                }
+            });
+
+            return true;
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to set path times because Grid is stopping: " +
+                path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean rename(final IgfsPath src, final IgfsPath dest) throws IgniteCheckedException {
+        try {
+            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
+                @Override public Void apply() {
+                    igfs.rename(src, dest);
+
+                    return null;
+                }
+            });
+
+            return true;
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to rename path because Grid is stopping: " + src);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean delete(final IgfsPath path, final boolean recursive) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<Boolean>() {
+                @Override public Boolean apply() {
+                    return igfs.delete(path, recursive);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to delete path because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsStatus fsStatus() throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new Callable<IgfsStatus>() {
+                @Override public IgfsStatus call() throws IgniteCheckedException {
+                    return igfs.globalSpace();
+                }
+            });
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to get file system status because Grid is " +
+                "stopping.");
+        }
+        catch (IgniteCheckedException | RuntimeException | Error e) {
+            throw e;
+        }
+        catch (Exception e) {
+            throw new AssertionError("Must never go there.");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsPath> listPaths(final IgfsPath path) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsPath>>() {
+                @Override public Collection<IgfsPath> apply() {
+                    return igfs.listPaths(path);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to list paths because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsFile> listFiles(final IgfsPath path) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsFile>>() {
+                @Override public Collection<IgfsFile> apply() {
+                    return igfs.listFiles(path);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to list files because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean mkdirs(final IgfsPath path, final Map<String, String> props) throws IgniteCheckedException {
+        try {
+            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
+                @Override public Void apply() {
+                    igfs.mkdirs(path, props);
+
+                    return null;
+                }
+            });
+
+            return true;
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to create directory because Grid is stopping: " +
+                path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsPathSummary contentSummary(final IgfsPath path) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsPathSummary>() {
+                @Override public IgfsPathSummary apply() {
+                    return igfs.summary(path);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to get content summary because Grid is stopping: " +
+                path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsBlockLocation> affinity(final IgfsPath path, final long start, final long len)
+        throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsBlockLocation>>() {
+                @Override public Collection<IgfsBlockLocation> apply() {
+                    return igfs.affinity(path, start, len);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to get affinity because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
+                @Override public HadoopIgfsStreamDelegate apply() {
+                    IgfsInputStream stream = igfs.open(path, bufSize);
+
+                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream, stream.length());
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch)
+        throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
+                @Override public HadoopIgfsStreamDelegate apply() {
+                    IgfsInputStream stream = igfs.open(path, bufSize, seqReadsBeforePrefetch);
+
+                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream, stream.length());
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate create(final IgfsPath path, final boolean overwrite, final boolean colocate,
+        final int replication, final long blockSize, final @Nullable Map<String, String> props) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
+                @Override public HadoopIgfsStreamDelegate apply() {
+                    IgfsOutputStream stream = igfs.create(path, bufSize, overwrite,
+                        colocate ? igfs.nextAffinityKey() : null, replication, blockSize, props);
+
+                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to create file because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate append(final IgfsPath path, final boolean create,
+        final @Nullable Map<String, String> props) throws IgniteCheckedException {
+        try {
+            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
+                @Override public HadoopIgfsStreamDelegate apply() {
+                    IgfsOutputStream stream = igfs.append(path, bufSize, create, props);
+
+                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream);
+                }
+            });
+        }
+        catch (IgniteException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (IllegalStateException e) {
+            throw new HadoopIgfsCommunicationException("Failed to append file because Grid is stopping: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len,
+        @Nullable byte[] outBuf, int outOff, int outLen) {
+        IgfsInputStream stream = delegate.target();
+
+        try {
+            byte[] res = null;
+
+            if (outBuf != null) {
+                int outTailLen = outBuf.length - outOff;
+
+                if (len <= outTailLen)
+                    stream.readFully(pos, outBuf, outOff, len);
+                else {
+                    stream.readFully(pos, outBuf, outOff, outTailLen);
+
+                    int remainderLen = len - outTailLen;
+
+                    res = new byte[remainderLen];
+
+                    stream.readFully(pos, res, 0, remainderLen);
+                }
+            } else {
+                res = new byte[len];
+
+                stream.readFully(pos, res, 0, len);
+            }
+
+            return new GridFinishedFuture<>(res);
+        }
+        catch (IllegalStateException | IOException e) {
+            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
+
+            if (lsnr != null)
+                lsnr.onError(e.getMessage());
+
+            return new GridFinishedFuture<>(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len)
+        throws IOException {
+        try {
+            IgfsOutputStream stream = delegate.target();
+
+            stream.write(data, off, len);
+        }
+        catch (IllegalStateException | IOException e) {
+            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
+
+            if (lsnr != null)
+                lsnr.onError(e.getMessage());
+
+            if (e instanceof IllegalStateException)
+                throw new IOException("Failed to write data to IGFS stream because Grid is stopping.", e);
+            else
+                throw e;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException {
+        try {
+            IgfsOutputStream stream = delegate.target();
+
+            stream.flush();
+        }
+        catch (IllegalStateException | IOException e) {
+            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
+
+            if (lsnr != null)
+                lsnr.onError(e.getMessage());
+
+            if (e instanceof IllegalStateException)
+                throw new IOException("Failed to flush data to IGFS stream because Grid is stopping.", e);
+            else
+                throw e;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException {
+        Closeable closeable = desc.target();
+
+        try {
+            closeable.close();
+        }
+        catch (IllegalStateException e) {
+            throw new IOException("Failed to close IGFS stream because Grid is stopping.", e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addEventListener(HadoopIgfsStreamDelegate delegate,
+        HadoopIgfsStreamEventListener lsnr) {
+        HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(delegate, lsnr);
+
+        assert lsnr0 == null || lsnr0 == lsnr;
+
+        if (log.isDebugEnabled())
+            log.debug("Added stream event listener [delegate=" + delegate + ']');
+    }
+
+    /** {@inheritDoc} */
+    @Override public void removeEventListener(HadoopIgfsStreamDelegate delegate) {
+        HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(delegate);
+
+        if (lsnr0 != null && log.isDebugEnabled())
+            log.debug("Removed stream event listener [delegate=" + delegate + ']');
+    }
+
+    /** {@inheritDoc} */
+    @Override public String user() {
+        return user;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
new file mode 100644
index 0000000..46b46d7
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
@@ -0,0 +1,629 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * IGFS input stream wrapper for hadoop interfaces.
+ */
+@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+public final class HadoopIgfsInputStream extends InputStream implements Seekable, PositionedReadable,
+    HadoopIgfsStreamEventListener {
+    /** Minimum buffer size. */
+    private static final int MIN_BUF_SIZE = 4 * 1024;
+
+    /** Server stream delegate. */
+    private HadoopIgfsStreamDelegate delegate;
+
+    /** Stream ID used by logger. */
+    private long logStreamId;
+
+    /** Stream position. */
+    private long pos;
+
+    /** Stream read limit. */
+    private long limit;
+
+    /** Mark position. */
+    private long markPos = -1;
+
+    /** Prefetch buffer. */
+    private DoubleFetchBuffer buf = new DoubleFetchBuffer();
+
+    /** Buffer half size for double-buffering. */
+    private int bufHalfSize;
+
+    /** Closed flag. */
+    private volatile boolean closed;
+
+    /** Flag set if stream was closed due to connection breakage. */
+    private boolean connBroken;
+
+    /** Logger. */
+    private Log log;
+
+    /** Client logger. */
+    private IgfsLogger clientLog;
+
+    /** Read time. */
+    private long readTime;
+
+    /** User time. */
+    private long userTime;
+
+    /** Last timestamp. */
+    private long lastTs;
+
+    /** Amount of read bytes. */
+    private long total;
+
+    /**
+     * Creates input stream.
+     *
+     * @param delegate Server stream delegate.
+     * @param limit Read limit.
+     * @param bufSize Buffer size.
+     * @param log Log.
+     * @param clientLog Client logger.
+     */
+    public HadoopIgfsInputStream(HadoopIgfsStreamDelegate delegate, long limit, int bufSize, Log log,
+        IgfsLogger clientLog, long logStreamId) {
+        assert limit >= 0;
+
+        this.delegate = delegate;
+        this.limit = limit;
+        this.log = log;
+        this.clientLog = clientLog;
+        this.logStreamId = logStreamId;
+
+        bufHalfSize = Math.max(bufSize, MIN_BUF_SIZE);
+
+        lastTs = System.nanoTime();
+
+        delegate.hadoop().addEventListener(delegate, this);
+    }
+
+    /**
+     * Read start.
+     */
+    private void readStart() {
+        long now = System.nanoTime();
+
+        userTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /**
+     * Read end.
+     */
+    private void readEnd() {
+        long now = System.nanoTime();
+
+        readTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read() throws IOException {
+        checkClosed();
+
+        readStart();
+
+        try {
+            if (eof())
+                return -1;
+
+            buf.refreshAhead(pos);
+
+            int res = buf.atPosition(pos);
+
+            pos++;
+            total++;
+
+            buf.refreshAhead(pos);
+
+            return res;
+        }
+        catch (IgniteCheckedException e) {
+            throw HadoopIgfsUtils.cast(e);
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read(@NotNull byte[] b, int off, int len) throws IOException {
+        checkClosed();
+
+        if (eof())
+            return -1;
+
+        readStart();
+
+        try {
+            long remaining = limit - pos;
+
+            int read = buf.flatten(b, pos, off, len);
+
+            pos += read;
+            total += read;
+            remaining -= read;
+
+            if (remaining > 0 && read != len) {
+                int readAmt = (int)Math.min(remaining, len - read);
+
+                delegate.hadoop().readData(delegate, pos, readAmt, b, off + read, len - read).get();
+
+                read += readAmt;
+                pos += readAmt;
+                total += readAmt;
+            }
+
+            buf.refreshAhead(pos);
+
+            return read;
+        }
+        catch (IgniteCheckedException e) {
+            throw HadoopIgfsUtils.cast(e);
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized long skip(long n) throws IOException {
+        checkClosed();
+
+        if (clientLog.isLogEnabled())
+            clientLog.logSkip(logStreamId, n);
+
+        long oldPos = pos;
+
+        if (pos + n <= limit)
+            pos += n;
+        else
+            pos = limit;
+
+        buf.refreshAhead(pos);
+
+        return pos - oldPos;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int available() throws IOException {
+        checkClosed();
+
+        int available = buf.available(pos);
+
+        assert available >= 0;
+
+        return available;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void close() throws IOException {
+        if (!closed) {
+            readStart();
+
+            if (log.isDebugEnabled())
+                log.debug("Closing input stream: " + delegate);
+
+            delegate.hadoop().closeStream(delegate);
+
+            readEnd();
+
+            if (clientLog.isLogEnabled())
+                clientLog.logCloseIn(logStreamId, userTime, readTime, total);
+
+            markClosed(false);
+
+            if (log.isDebugEnabled())
+                log.debug("Closed stream [delegate=" + delegate + ", readTime=" + readTime +
+                    ", userTime=" + userTime + ']');
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void mark(int readLimit) {
+        markPos = pos;
+
+        if (clientLog.isLogEnabled())
+            clientLog.logMark(logStreamId, readLimit);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void reset() throws IOException {
+        checkClosed();
+
+        if (clientLog.isLogEnabled())
+            clientLog.logReset(logStreamId);
+
+        if (markPos == -1)
+            throw new IOException("Stream was not marked.");
+
+        pos = markPos;
+
+        buf.refreshAhead(pos);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean markSupported() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read(long position, byte[] buf, int off, int len) throws IOException {
+        long remaining = limit - position;
+
+        int read = (int)Math.min(len, remaining);
+
+        // Return -1 at EOF.
+        if (read == 0)
+            return -1;
+
+        readFully(position, buf, off, read);
+
+        return read;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void readFully(long position, byte[] buf, int off, int len) throws IOException {
+        long remaining = limit - position;
+
+        checkClosed();
+
+        if (len > remaining)
+            throw new EOFException("End of stream reached before data was fully read.");
+
+        readStart();
+
+        try {
+            int read = this.buf.flatten(buf, position, off, len);
+
+            total += read;
+
+            if (read != len) {
+                int readAmt = len - read;
+
+                delegate.hadoop().readData(delegate, position + read, readAmt, buf, off + read, readAmt).get();
+
+                total += readAmt;
+            }
+
+            if (clientLog.isLogEnabled())
+                clientLog.logRandomRead(logStreamId, position, len);
+        }
+        catch (IgniteCheckedException e) {
+            throw HadoopIgfsUtils.cast(e);
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFully(long position, byte[] buf) throws IOException {
+        readFully(position, buf, 0, buf.length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void seek(long pos) throws IOException {
+        A.ensure(pos >= 0, "position must be non-negative");
+
+        checkClosed();
+
+        if (clientLog.isLogEnabled())
+            clientLog.logSeek(logStreamId, pos);
+
+        if (pos > limit)
+            pos = limit;
+
+        if (log.isDebugEnabled())
+            log.debug("Seek to position [delegate=" + delegate + ", pos=" + pos + ", oldPos=" + this.pos + ']');
+
+        this.pos = pos;
+
+        buf.refreshAhead(pos);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized long getPos() {
+        return pos;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized boolean seekToNewSource(long targetPos) {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onClose() {
+        markClosed(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(String errMsg) {
+        // No-op.
+    }
+
+    /**
+     * Marks stream as closed.
+     *
+     * @param connBroken {@code True} if connection with server was lost.
+     */
+    private void markClosed(boolean connBroken) {
+        // It is ok to have race here.
+        if (!closed) {
+            closed = true;
+
+            this.connBroken = connBroken;
+
+            delegate.hadoop().removeEventListener(delegate);
+        }
+    }
+
+    /**
+     * @throws IOException If check failed.
+     */
+    private void checkClosed() throws IOException {
+        if (closed) {
+            if (connBroken)
+                throw new IOException("Server connection was lost.");
+            else
+                throw new IOException("Stream is closed.");
+        }
+    }
+
+    /**
+     * @return {@code True} if end of stream reached.
+     */
+    private boolean eof() {
+        return limit == pos;
+    }
+
+    /**
+     * Asynchronous prefetch buffer.
+     */
+    private static class FetchBufferPart {
+        /** Read future. */
+        private IgniteInternalFuture<byte[]> readFut;
+
+        /** Position of cached chunk in file. */
+        private long pos;
+
+        /** Prefetch length. Need to store as read future result might be not available yet. */
+        private int len;
+
+        /**
+         * Creates fetch buffer part.
+         *
+         * @param readFut Read future for this buffer.
+         * @param pos Read position.
+         * @param len Chunk length.
+         */
+        private FetchBufferPart(IgniteInternalFuture<byte[]> readFut, long pos, int len) {
+            this.readFut = readFut;
+            this.pos = pos;
+            this.len = len;
+        }
+
+        /**
+         * Copies cached data if specified position matches cached region.
+         *
+         * @param dst Destination buffer.
+         * @param pos Read position in file.
+         * @param dstOff Offset in destination buffer from which start writing.
+         * @param len Maximum number of bytes to copy.
+         * @return Number of bytes copied.
+         * @throws IgniteCheckedException If read future failed.
+         */
+        public int flatten(byte[] dst, long pos, int dstOff, int len) throws IgniteCheckedException {
+            // If read start position is within cached boundaries.
+            if (contains(pos)) {
+                byte[] data = readFut.get();
+
+                int srcPos = (int)(pos - this.pos);
+                int cpLen = Math.min(len, data.length - srcPos);
+
+                U.arrayCopy(data, srcPos, dst, dstOff, cpLen);
+
+                return cpLen;
+            }
+
+            return 0;
+        }
+
+        /**
+         * @return {@code True} if data is ready to be read.
+         */
+        public boolean ready() {
+            return readFut.isDone();
+        }
+
+        /**
+         * Checks if current buffer part contains given position.
+         *
+         * @param pos Position to check.
+         * @return {@code True} if position matches buffer region.
+         */
+        public boolean contains(long pos) {
+            return this.pos <= pos && this.pos + len > pos;
+        }
+    }
+
+    private class DoubleFetchBuffer {
+        /**  */
+        private FetchBufferPart first;
+
+        /** */
+        private FetchBufferPart second;
+
+        /**
+         * Copies fetched data from both buffers to destination array if cached region matched read position.
+         *
+         * @param dst Destination buffer.
+         * @param pos Read position in file.
+         * @param dstOff Destination buffer offset.
+         * @param len Maximum number of bytes to copy.
+         * @return Number of bytes copied.
+         * @throws IgniteCheckedException If any read operation failed.
+         */
+        public int flatten(byte[] dst, long pos, int dstOff, int len) throws IgniteCheckedException {
+            assert dstOff >= 0;
+            assert dstOff + len <= dst.length : "Invalid indices [dst.length=" + dst.length + ", dstOff=" + dstOff +
+                ", len=" + len + ']';
+
+            int bytesCopied = 0;
+
+            if (first != null) {
+                bytesCopied += first.flatten(dst, pos, dstOff, len);
+
+                if (bytesCopied != len && second != null) {
+                    assert second.pos == first.pos + first.len;
+
+                    bytesCopied += second.flatten(dst, pos + bytesCopied, dstOff + bytesCopied, len - bytesCopied);
+                }
+            }
+
+            return bytesCopied;
+        }
+
+        /**
+         * Gets byte at specified position in buffer.
+         *
+         * @param pos Stream position.
+         * @return Read byte.
+         * @throws IgniteCheckedException If read failed.
+         */
+        public int atPosition(long pos) throws IgniteCheckedException {
+            // Should not reach here if stream contains no data.
+            assert first != null;
+
+            if (first.contains(pos)) {
+                byte[] bytes = first.readFut.get();
+
+                return bytes[((int)(pos - first.pos))] & 0xFF;
+            }
+            else {
+                assert second != null;
+                assert second.contains(pos);
+
+                byte[] bytes = second.readFut.get();
+
+                return bytes[((int)(pos - second.pos))] & 0xFF;
+            }
+        }
+
+        /**
+         * Starts asynchronous buffer refresh if needed, depending on current position.
+         *
+         * @param pos Current stream position.
+         */
+        public void refreshAhead(long pos) {
+            if (fullPrefetch(pos)) {
+                first = fetch(pos, bufHalfSize);
+                second = fetch(pos + bufHalfSize, bufHalfSize);
+            }
+            else if (needFlip(pos)) {
+                first = second;
+
+                second = fetch(first.pos + first.len, bufHalfSize);
+            }
+        }
+
+        /**
+         * @param pos Position from which read is expected.
+         * @return Number of bytes available to be read without blocking.
+         */
+        public int available(long pos) {
+            int available = 0;
+
+            if (first != null) {
+                if (first.contains(pos)) {
+                    if (first.ready()) {
+                        available += (pos - first.pos);
+
+                        if (second != null && second.ready())
+                            available += second.len;
+                    }
+                }
+                else {
+                    if (second != null && second.contains(pos) && second.ready())
+                        available += (pos - second.pos);
+                }
+            }
+
+            return available;
+        }
+
+        /**
+         * Checks if position shifted enough to forget previous buffer.
+         *
+         * @param pos Current position.
+         * @return {@code True} if need flip buffers.
+         */
+        private boolean needFlip(long pos) {
+            // Return true if we read more then half of second buffer.
+            return second != null && second.contains(pos);
+        }
+
+        /**
+         * Determines if all cached bytes should be discarded and new region should be
+         * prefetched.
+         *
+         * @param curPos Current stream position.
+         * @return {@code True} if need to refresh both blocks.
+         */
+        private boolean fullPrefetch(long curPos) {
+            // If no data was prefetched yet, return true.
+            return first == null || curPos < first.pos || (second != null && curPos >= second.pos + second.len);
+        }
+
+        /**
+         * Starts asynchronous fetch for given region.
+         *
+         * @param pos Position to read from.
+         * @param size Number of bytes to read.
+         * @return Fetch buffer part.
+         */
+        private FetchBufferPart fetch(long pos, int size) {
+            long remaining = limit - pos;
+
+            size = (int)Math.min(size, remaining);
+
+            return size <= 0 ? null :
+                new FetchBufferPart(delegate.hadoop().readData(delegate, pos, size, null, 0, 0), pos, size);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
new file mode 100644
index 0000000..70f645f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.igfs.common.IgfsMessage;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * IO abstraction layer for IGFS client. Two kind of messages are expected to be sent: requests with response
+ * and request without response.
+ */
+public interface HadoopIgfsIo {
+    /**
+     * Sends given IGFS client message and asynchronously awaits for response.
+     *
+     * @param msg Message to send.
+     * @return Future that will be completed.
+     * @throws IgniteCheckedException If a message cannot be sent (connection is broken or client was closed).
+     */
+    public IgniteInternalFuture<IgfsMessage> send(IgfsMessage msg) throws IgniteCheckedException;
+
+    /**
+     * Sends given IGFS client message and asynchronously awaits for response. When IO detects response
+     * beginning for given message it stops reading data and passes input stream to closure which can read
+     * response in a specific way.
+     *
+     * @param msg Message to send.
+     * @param outBuf Output buffer. If {@code null}, the output buffer is not used.
+     * @param outOff Output buffer offset.
+     * @param outLen Output buffer length.
+     * @return Future that will be completed when response is returned from closure.
+     * @throws IgniteCheckedException If a message cannot be sent (connection is broken or client was closed).
+     */
+    public <T> IgniteInternalFuture<T> send(IgfsMessage msg, @Nullable byte[] outBuf, int outOff, int outLen)
+        throws IgniteCheckedException;
+
+    /**
+     * Sends given message and does not wait for response.
+     *
+     * @param msg Message to send.
+     * @throws IgniteCheckedException If send failed.
+     */
+    public void sendPlain(IgfsMessage msg) throws IgniteCheckedException;
+
+    /**
+     * Adds event listener that will be invoked when connection with server is lost or remote error has occurred.
+     * If connection is closed already, callback will be invoked synchronously inside this method.
+     *
+     * @param lsnr Event listener.
+     */
+    public void addEventListener(HadoopIgfsIpcIoListener lsnr);
+
+    /**
+     * Removes event listener that will be invoked when connection with server is lost or remote error has occurred.
+     *
+     * @param lsnr Event listener.
+     */
+    public void removeEventListener(HadoopIgfsIpcIoListener lsnr);
+}
\ No newline at end of file


[10/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
deleted file mode 100644
index 17c2ff5..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.util.nio.GridNioFuture;
-import org.apache.ignite.internal.util.nio.GridNioSession;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Grid client for NIO server.
- */
-public class HadoopTcpNioCommunicationClient extends HadoopAbstractCommunicationClient {
-    /** Socket. */
-    private final GridNioSession ses;
-
-    /**
-     * Constructor for test purposes only.
-     */
-    public HadoopTcpNioCommunicationClient() {
-        ses = null;
-    }
-
-    /**
-     * @param ses Session.
-     */
-    public HadoopTcpNioCommunicationClient(GridNioSession ses) {
-        assert ses != null;
-
-        this.ses = ses;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean close() {
-        boolean res = super.close();
-
-        if (res)
-            ses.close();
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void forceClose() {
-        super.forceClose();
-
-        ses.close();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg)
-        throws IgniteCheckedException {
-        if (closed())
-            throw new IgniteCheckedException("Client was closed: " + this);
-
-        GridNioFuture<?> fut = ses.send(msg);
-
-        if (fut.isDone())
-            fut.get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getIdleTime() {
-        long now = U.currentTimeMillis();
-
-        // Session can be used for receiving and sending.
-        return Math.min(Math.min(now - ses.lastReceiveTime(), now - ses.lastSendScheduleTime()),
-            now - ses.lastSendTime());
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopTcpNioCommunicationClient.class, this, super.toString());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
deleted file mode 100644
index 750b314..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.IOException;
-import org.apache.hadoop.mapred.JobContext;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.OutputCommitter;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
-
-/**
- * Hadoop cleanup task implementation for v1 API.
- */
-public class HadoopV1CleanupTask extends HadoopV1Task {
-    /** Abort flag. */
-    private final boolean abort;
-
-    /**
-     * @param taskInfo Task info.
-     * @param abort Abort flag.
-     */
-    public HadoopV1CleanupTask(HadoopTaskInfo taskInfo, boolean abort) {
-        super(taskInfo);
-
-        this.abort = abort;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
-
-        JobContext jobCtx = ctx.jobContext();
-
-        try {
-            OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();
-
-            if (abort)
-                committer.abortJob(jobCtx, JobStatus.State.FAILED);
-            else
-                committer.commitJob(jobCtx);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
deleted file mode 100644
index c623eab..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Counter;
-
-import static org.apache.hadoop.mapreduce.util.CountersStrings.toEscapedCompactString;
-
-/**
- * Hadoop counter implementation for v1 API.
- */
-public class HadoopV1Counter extends Counters.Counter {
-    /** Delegate. */
-    private final HadoopLongCounter cntr;
-
-    /**
-     * Creates new instance.
-     *
-     * @param cntr Delegate counter.
-     */
-    public HadoopV1Counter(HadoopLongCounter cntr) {
-        this.cntr = cntr;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setDisplayName(String displayName) {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getName() {
-        return cntr.name();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getDisplayName() {
-        return getName();
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getValue() {
-        return cntr.value();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setValue(long val) {
-        cntr.value(val);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void increment(long incr) {
-        cntr.increment(incr);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(DataOutput out) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFields(DataInput in) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public String makeEscapedCompactString() {
-        return toEscapedCompactString(new HadoopV2Counter(cntr));
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public boolean contentEquals(Counters.Counter cntr) {
-        return getUnderlyingCounter().equals(cntr.getUnderlyingCounter());
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getCounter() {
-        return cntr.value();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter getUnderlyingCounter() {
-        return this;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
deleted file mode 100644
index fb2266a..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
-
-/**
- * Hadoop map task implementation for v1 API.
- */
-public class HadoopV1MapTask extends HadoopV1Task {
-    /** */
-    private static final String[] EMPTY_HOSTS = new String[0];
-
-    /**
-     * Constructor.
-     *
-     * @param taskInfo 
-     */
-    public HadoopV1MapTask(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        HadoopJob job = taskCtx.job();
-
-        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
-
-        JobConf jobConf = ctx.jobConf();
-
-        InputFormat inFormat = jobConf.getInputFormat();
-
-        HadoopInputSplit split = info().inputSplit();
-
-        InputSplit nativeSplit;
-
-        if (split instanceof HadoopFileBlock) {
-            HadoopFileBlock block = (HadoopFileBlock)split;
-
-            nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS);
-        }
-        else
-            nativeSplit = (InputSplit)ctx.getNativeSplit(split);
-
-        assert nativeSplit != null;
-
-        Reporter reporter = new HadoopV1Reporter(taskCtx);
-
-        HadoopV1OutputCollector collector = null;
-
-        try {
-            collector = collector(jobConf, ctx, !job.info().hasCombiner() && !job.info().hasReducer(),
-                fileName(), ctx.attemptId());
-
-            RecordReader reader = inFormat.getRecordReader(nativeSplit, jobConf, reporter);
-
-            Mapper mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), jobConf);
-
-            Object key = reader.createKey();
-            Object val = reader.createValue();
-
-            assert mapper != null;
-
-            try {
-                try {
-                    while (reader.next(key, val)) {
-                        if (isCancelled())
-                            throw new HadoopTaskCancelledException("Map task cancelled.");
-
-                        mapper.map(key, val, collector, reporter);
-                    }
-                }
-                finally {
-                    mapper.close();
-                }
-            }
-            finally {
-                collector.closeWriter();
-            }
-
-            collector.commit();
-        }
-        catch (Exception e) {
-            if (collector != null)
-                collector.abort();
-
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
deleted file mode 100644
index 37f81a6..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.IOException;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.OutputCommitter;
-import org.apache.hadoop.mapred.OutputFormat;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.TaskAttemptContext;
-import org.apache.hadoop.mapred.TaskAttemptContextImpl;
-import org.apache.hadoop.mapred.TaskAttemptID;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop output collector.
- */
-public class HadoopV1OutputCollector implements OutputCollector {
-    /** Job configuration. */
-    private final JobConf jobConf;
-
-    /** Task context. */
-    private final HadoopTaskContext taskCtx;
-
-    /** Optional direct writer. */
-    private final RecordWriter writer;
-
-    /** Task attempt. */
-    private final TaskAttemptID attempt;
-
-    /**
-     * @param jobConf Job configuration.
-     * @param taskCtx Task context.
-     * @param directWrite Direct write flag.
-     * @param fileName File name.
-     * @throws IOException In case of IO exception.
-     */
-    HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
-        @Nullable String fileName, TaskAttemptID attempt) throws IOException {
-        this.jobConf = jobConf;
-        this.taskCtx = taskCtx;
-        this.attempt = attempt;
-
-        if (directWrite) {
-            jobConf.set("mapreduce.task.attempt.id", attempt.toString());
-
-            OutputFormat outFormat = jobConf.getOutputFormat();
-
-            writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
-        }
-        else
-            writer = null;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void collect(Object key, Object val) throws IOException {
-        if (writer != null)
-            writer.write(key, val);
-        else {
-            try {
-                taskCtx.output().write(key, val);
-            }
-            catch (IgniteCheckedException e) {
-                throw new IOException(e);
-            }
-        }
-    }
-
-    /**
-     * Close writer.
-     *
-     * @throws IOException In case of IO exception.
-     */
-    public void closeWriter() throws IOException {
-        if (writer != null)
-            writer.close(Reporter.NULL);
-    }
-
-    /**
-     * Setup task.
-     *
-     * @throws IOException If failed.
-     */
-    public void setup() throws IOException {
-        if (writer != null)
-            jobConf.getOutputCommitter().setupTask(new TaskAttemptContextImpl(jobConf, attempt));
-    }
-
-    /**
-     * Commit task.
-     *
-     * @throws IOException In failed.
-     */
-    public void commit() throws IOException {
-        if (writer != null) {
-            OutputCommitter outputCommitter = jobConf.getOutputCommitter();
-
-            TaskAttemptContext taskCtx = new TaskAttemptContextImpl(jobConf, attempt);
-
-            if (outputCommitter.needsTaskCommit(taskCtx))
-                outputCommitter.commitTask(taskCtx);
-        }
-    }
-
-    /**
-     * Abort task.
-     */
-    public void abort() {
-        try {
-            if (writer != null)
-                jobConf.getOutputCommitter().abortTask(new TaskAttemptContextImpl(jobConf, attempt));
-        }
-        catch (IOException ignore) {
-            // No-op.
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
deleted file mode 100644
index 0ab1bba..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.Partitioner;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
-
-/**
- * Hadoop partitioner adapter for v1 API.
- */
-public class HadoopV1Partitioner implements HadoopPartitioner {
-    /** Partitioner instance. */
-    private Partitioner<Object, Object> part;
-
-    /**
-     * @param cls Hadoop partitioner class.
-     * @param conf Job configuration.
-     */
-    public HadoopV1Partitioner(Class<? extends Partitioner> cls, Configuration conf) {
-        part = (Partitioner<Object, Object>) ReflectionUtils.newInstance(cls, conf);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int partition(Object key, Object val, int parts) {
-        return part.getPartition(key, val, parts);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
deleted file mode 100644
index e656695..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
-
-/**
- * Hadoop reduce task implementation for v1 API.
- */
-public class HadoopV1ReduceTask extends HadoopV1Task {
-    /** {@code True} if reduce, {@code false} if combine. */
-    private final boolean reduce;
-
-    /**
-     * Constructor.
-     *
-     * @param taskInfo Task info.
-     * @param reduce {@code True} if reduce, {@code false} if combine.
-     */
-    public HadoopV1ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) {
-        super(taskInfo);
-
-        this.reduce = reduce;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        HadoopJob job = taskCtx.job();
-
-        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
-
-        JobConf jobConf = ctx.jobConf();
-
-        HadoopTaskInput input = taskCtx.input();
-
-        HadoopV1OutputCollector collector = null;
-
-        try {
-            collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId());
-
-            Reducer reducer;
-            if (reduce) reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(),
-                jobConf);
-            else reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(),
-                jobConf);
-
-            assert reducer != null;
-
-            try {
-                try {
-                    while (input.next()) {
-                        if (isCancelled())
-                            throw new HadoopTaskCancelledException("Reduce task cancelled.");
-
-                        reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
-                    }
-                }
-                finally {
-                    reducer.close();
-                }
-            }
-            finally {
-                collector.closeWriter();
-            }
-
-            collector.commit();
-        }
-        catch (Exception e) {
-            if (collector != null)
-                collector.abort();
-
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
deleted file mode 100644
index 5a63aab..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
-
-/**
- * Hadoop reporter implementation for v1 API.
- */
-public class HadoopV1Reporter implements Reporter {
-    /** Context. */
-    private final HadoopTaskContext ctx;
-
-    /**
-     * Creates new instance.
-     *
-     * @param ctx Context.
-     */
-    public HadoopV1Reporter(HadoopTaskContext ctx) {
-        this.ctx = ctx;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setStatus(String status) {
-        // TODO
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counters.Counter getCounter(Enum<?> name) {
-        return getCounter(name.getDeclaringClass().getName(), name.name());
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counters.Counter getCounter(String grp, String name) {
-        return new HadoopV1Counter(ctx.counter(grp, name, HadoopLongCounter.class));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void incrCounter(Enum<?> key, long amount) {
-        getCounter(key).increment(amount);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void incrCounter(String grp, String cntr, long amount) {
-        getCounter(grp, cntr).increment(amount);
-    }
-
-    /** {@inheritDoc} */
-    @Override public InputSplit getInputSplit() throws UnsupportedOperationException {
-        throw new UnsupportedOperationException("reporter has no input"); // TODO
-    }
-
-    /** {@inheritDoc} */
-    @Override public float getProgress() {
-        return 0.5f; // TODO
-    }
-
-    /** {@inheritDoc} */
-    @Override public void progress() {
-        // TODO
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
deleted file mode 100644
index d2f6823..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.IOException;
-import org.apache.hadoop.mapred.OutputCommitter;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
-
-/**
- * Hadoop setup task implementation for v1 API.
- */
-public class HadoopV1SetupTask extends HadoopV1Task {
-    /**
-     * Constructor.
-     *
-     * @param taskInfo Task info.
-     */
-    public HadoopV1SetupTask(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
-
-        try {
-            ctx.jobConf().getOutputFormat().checkOutputSpecs(null, ctx.jobConf());
-
-            OutputCommitter committer = ctx.jobConf().getOutputCommitter();
-
-            if (committer != null)
-                committer.setupJob(ctx.jobContext());
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
deleted file mode 100644
index 203def4..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop API v1 splitter.
- */
-public class HadoopV1Splitter {
-    /** */
-    private static final String[] EMPTY_HOSTS = {};
-
-    /**
-     * @param jobConf Job configuration.
-     * @return Collection of mapped splits.
-     * @throws IgniteCheckedException If mapping failed.
-     */
-    public static Collection<HadoopInputSplit> splitJob(JobConf jobConf) throws IgniteCheckedException {
-        try {
-            InputFormat<?, ?> format = jobConf.getInputFormat();
-
-            assert format != null;
-
-            InputSplit[] splits = format.getSplits(jobConf, 0);
-
-            Collection<HadoopInputSplit> res = new ArrayList<>(splits.length);
-
-            for (int i = 0; i < splits.length; i++) {
-                InputSplit nativeSplit = splits[i];
-
-                if (nativeSplit instanceof FileSplit) {
-                    FileSplit s = (FileSplit)nativeSplit;
-
-                    res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
-                }
-                else
-                    res.add(HadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations()));
-            }
-
-            return res;
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /**
-     * @param clsName Input split class name.
-     * @param in Input stream.
-     * @param hosts Optional hosts.
-     * @return File block or {@code null} if it is not a {@link FileSplit} instance.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable public static HadoopFileBlock readFileBlock(String clsName, FSDataInputStream in,
-        @Nullable String[] hosts) throws IgniteCheckedException {
-        if (!FileSplit.class.getName().equals(clsName))
-            return null;
-
-        FileSplit split = U.newInstance(FileSplit.class);
-
-        try {
-            split.readFields(in);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-
-        if (hosts == null)
-            hosts = EMPTY_HOSTS;
-
-        return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
deleted file mode 100644
index a89323c..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v1;
-
-import java.io.IOException;
-import java.text.NumberFormat;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.TaskAttemptID;
-import org.apache.ignite.internal.processors.hadoop.HadoopTask;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Extended Hadoop v1 task.
- */
-public abstract class HadoopV1Task extends HadoopTask {
-    /** Indicates that this task is to be cancelled. */
-    private volatile boolean cancelled;
-
-    /**
-     * Constructor.
-     *
-     * @param taskInfo Task info.
-     */
-    protected HadoopV1Task(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /**
-     * Gets file name for that task result.
-     *
-     * @return File name.
-     */
-    public String fileName() {
-        NumberFormat numFormat = NumberFormat.getInstance();
-
-        numFormat.setMinimumIntegerDigits(5);
-        numFormat.setGroupingUsed(false);
-
-        return "part-" + numFormat.format(info().taskNumber());
-    }
-
-    /**
-     *
-     * @param jobConf Job configuration.
-     * @param taskCtx Task context.
-     * @param directWrite Direct write flag.
-     * @param fileName File name.
-     * @param attempt Attempt of task.
-     * @return Collector.
-     * @throws IOException In case of IO exception.
-     */
-    protected HadoopV1OutputCollector collector(JobConf jobConf, HadoopV2TaskContext taskCtx,
-        boolean directWrite, @Nullable String fileName, TaskAttemptID attempt) throws IOException {
-        HadoopV1OutputCollector collector = new HadoopV1OutputCollector(jobConf, taskCtx, directWrite,
-            fileName, attempt) {
-            /** {@inheritDoc} */
-            @Override public void collect(Object key, Object val) throws IOException {
-                if (cancelled)
-                    throw new HadoopTaskCancelledException("Task cancelled.");
-
-                super.collect(key, val);
-            }
-        };
-
-        collector.setup();
-
-        return collector;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cancel() {
-        cancelled = true;
-    }
-
-    /** Returns true if task is cancelled. */
-    public boolean isCancelled() {
-        return cancelled;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
deleted file mode 100644
index 9632525..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.util.Collection;
-import java.util.LinkedList;
-
-/**
- * Replacement for Hadoop {@code org.apache.hadoop.util.Daemon} class.
- */
-@SuppressWarnings("UnusedDeclaration")
-public class HadoopDaemon extends Thread {
-    /** Lock object used for synchronization. */
-    private static final Object lock = new Object();
-
-    /** Collection to hold the threads to be stopped. */
-    private static Collection<HadoopDaemon> daemons = new LinkedList<>();
-
-    {
-        setDaemon(true); // always a daemon
-    }
-
-    /** Runnable of this thread, may be this. */
-    final Runnable runnable;
-
-    /**
-     * Construct a daemon thread.
-     */
-    public HadoopDaemon() {
-        super();
-
-        runnable = this;
-
-        enqueueIfNeeded();
-    }
-
-    /**
-     * Construct a daemon thread.
-     */
-    public HadoopDaemon(Runnable runnable) {
-        super(runnable);
-
-        this.runnable = runnable;
-
-        this.setName(runnable.toString());
-
-        enqueueIfNeeded();
-    }
-
-    /**
-     * Construct a daemon thread to be part of a specified thread group.
-     */
-    public HadoopDaemon(ThreadGroup grp, Runnable runnable) {
-        super(grp, runnable);
-
-        this.runnable = runnable;
-
-        this.setName(runnable.toString());
-
-        enqueueIfNeeded();
-    }
-
-    /**
-     * Getter for the runnable. May return this.
-     *
-     * @return the runnable
-     */
-    public Runnable getRunnable() {
-        return runnable;
-    }
-
-    /**
-     * if the runnable is a Hadoop org.apache.hadoop.hdfs.PeerCache Runnable.
-     *
-     * @param r the runnable.
-     * @return true if it is.
-     */
-    private static boolean isPeerCacheRunnable(Runnable r) {
-        String name = r.getClass().getName();
-
-        return name.startsWith("org.apache.hadoop.hdfs.PeerCache");
-    }
-
-    /**
-     * Enqueue this thread if it should be stopped upon the task end.
-     */
-    private void enqueueIfNeeded() {
-        synchronized (lock) {
-            if (daemons == null)
-                throw new RuntimeException("Failed to create HadoopDaemon (its registry is already cleared): " +
-                    "[classLoader=" + getClass().getClassLoader() + ']');
-
-            if (runnable.getClass().getClassLoader() == getClass().getClassLoader() && isPeerCacheRunnable(runnable))
-                daemons.add(this);
-        }
-    }
-
-    /**
-     * Stops all the registered threads.
-     */
-    public static void dequeueAndStopAll() {
-        synchronized (lock) {
-            if (daemons != null) {
-                for (HadoopDaemon daemon : daemons)
-                    daemon.interrupt();
-
-                daemons = null;
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
deleted file mode 100644
index c7e8a0a..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-
-/**
- * Split serialized in external file.
- */
-public class HadoopExternalSplit extends HadoopInputSplit {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    private long off;
-
-    /**
-     * For {@link Externalizable}.
-     */
-    public HadoopExternalSplit() {
-        // No-op.
-    }
-
-    /**
-     * @param hosts Hosts.
-     * @param off Offset of this split in external file.
-     */
-    public HadoopExternalSplit(String[] hosts, long off) {
-        assert off >= 0 : off;
-        assert hosts != null;
-
-        this.hosts = hosts;
-        this.off = off;
-    }
-
-    /**
-     * @return Offset of this input split in external file.
-     */
-    public long offset() {
-        return off;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeLong(off);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        off = in.readLong();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o)
-            return true;
-
-        if (o == null || getClass() != o.getClass())
-            return false;
-
-        HadoopExternalSplit that = (HadoopExternalSplit) o;
-
-        return off == that.off;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return (int)(off ^ (off >>> 32));
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
deleted file mode 100644
index 844e7f8..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.Serialization;
-import org.apache.hadoop.io.serializer.Serializer;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * The wrapper around external serializer.
- */
-public class HadoopSerializationWrapper<T> implements HadoopSerialization {
-    /** External serializer - writer. */
-    private final Serializer<T> serializer;
-
-    /** External serializer - reader. */
-    private final Deserializer<T> deserializer;
-
-    /** Data output for current write operation. */
-    private OutputStream currOut;
-
-    /** Data input for current read operation. */
-    private InputStream currIn;
-
-    /** Wrapper around current output to provide OutputStream interface. */
-    private final OutputStream outStream = new OutputStream() {
-        /** {@inheritDoc} */
-        @Override public void write(int b) throws IOException {
-            currOut.write(b);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void write(byte[] b, int off, int len) throws IOException {
-            currOut.write(b, off, len);
-        }
-    };
-
-    /** Wrapper around current input to provide InputStream interface. */
-    private final InputStream inStream = new InputStream() {
-        /** {@inheritDoc} */
-        @Override public int read() throws IOException {
-            return currIn.read();
-        }
-
-        /** {@inheritDoc} */
-        @Override public int read(byte[] b, int off, int len) throws IOException {
-            return currIn.read(b, off, len);
-        }
-    };
-
-    /**
-     * @param serialization External serializer to wrap.
-     * @param cls The class to serialize.
-     */
-    public HadoopSerializationWrapper(Serialization<T> serialization, Class<T> cls) throws IgniteCheckedException {
-        assert cls != null;
-
-        serializer = serialization.getSerializer(cls);
-        deserializer = serialization.getDeserializer(cls);
-
-        try {
-            serializer.open(outStream);
-            deserializer.open(inStream);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(DataOutput out, Object obj) throws IgniteCheckedException {
-        assert out != null;
-        assert obj != null;
-
-        try {
-            currOut = (OutputStream)out;
-
-            serializer.serialize((T)obj);
-
-            currOut = null;
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object read(DataInput in, @Nullable Object obj) throws IgniteCheckedException {
-        assert in != null;
-
-        try {
-            currIn = (InputStream)in;
-
-            T res = deserializer.deserialize((T) obj);
-
-            currIn = null;
-
-            return res;
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IgniteCheckedException {
-        try {
-            serializer.close();
-            deserializer.close();
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
deleted file mode 100644
index 8bd71e0..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Fake manager for shutdown hooks.
- */
-public class HadoopShutdownHookManager {
-    /** */
-    private static final HadoopShutdownHookManager MGR = new HadoopShutdownHookManager();
-
-    /**
-     * Return <code>ShutdownHookManager</code> singleton.
-     *
-     * @return <code>ShutdownHookManager</code> singleton.
-     */
-    public static HadoopShutdownHookManager get() {
-        return MGR;
-    }
-
-    /** */
-    private Set<Runnable> hooks = Collections.synchronizedSet(new HashSet<Runnable>());
-
-    /** */
-    private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
-
-    /**
-     * Singleton.
-     */
-    private HadoopShutdownHookManager() {
-        // No-op.
-    }
-
-    /**
-     * Adds a shutdownHook with a priority, the higher the priority
-     * the earlier will run. ShutdownHooks with same priority run
-     * in a non-deterministic order.
-     *
-     * @param shutdownHook shutdownHook <code>Runnable</code>
-     * @param priority priority of the shutdownHook.
-     */
-    public void addShutdownHook(Runnable shutdownHook, int priority) {
-        if (shutdownHook == null)
-            throw new IllegalArgumentException("shutdownHook cannot be NULL");
-
-        hooks.add(shutdownHook);
-    }
-
-    /**
-     * Removes a shutdownHook.
-     *
-     * @param shutdownHook shutdownHook to remove.
-     * @return TRUE if the shutdownHook was registered and removed,
-     * FALSE otherwise.
-     */
-    public boolean removeShutdownHook(Runnable shutdownHook) {
-        return hooks.remove(shutdownHook);
-    }
-
-    /**
-     * Indicates if a shutdownHook is registered or not.
-     *
-     * @param shutdownHook shutdownHook to check if registered.
-     * @return TRUE/FALSE depending if the shutdownHook is is registered.
-     */
-    public boolean hasShutdownHook(Runnable shutdownHook) {
-        return hooks.contains(shutdownHook);
-    }
-
-    /**
-     * Indicates if shutdown is in progress or not.
-     *
-     * @return TRUE if the shutdown is in progress, otherwise FALSE.
-     */
-    public boolean isShutdownInProgress() {
-        return shutdownInProgress.get();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
deleted file mode 100644
index df77adb..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * The wrapper for native hadoop input splits.
- *
- * Warning!! This class must not depend on any Hadoop classes directly or indirectly.
- */
-public class HadoopSplitWrapper extends HadoopInputSplit {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Native hadoop input split. */
-    private byte[] bytes;
-
-    /** */
-    private String clsName;
-
-    /** Internal ID */
-    private int id;
-
-    /**
-     * Creates new split wrapper.
-     */
-    public HadoopSplitWrapper() {
-        // No-op.
-    }
-
-    /**
-     * Creates new split wrapper.
-     *
-     * @param id Split ID.
-     * @param clsName Class name.
-     * @param bytes Serialized class.
-     * @param hosts Hosts where split is located.
-     */
-    public HadoopSplitWrapper(int id, String clsName, byte[] bytes, String[] hosts) {
-        assert hosts != null;
-        assert clsName != null;
-        assert bytes != null;
-
-        this.hosts = hosts;
-        this.id = id;
-
-        this.clsName = clsName;
-        this.bytes = bytes;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeInt(id);
-
-        out.writeUTF(clsName);
-        U.writeByteArray(out, bytes);
-    }
-
-    /**
-     * @return Class name.
-     */
-    public String className() {
-        return clsName;
-    }
-
-    /**
-     * @return Class bytes.
-     */
-    public byte[] bytes() {
-        return bytes;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        id = in.readInt();
-
-        clsName = in.readUTF();
-        bytes = U.readByteArray(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o)
-            return true;
-
-        if (o == null || getClass() != o.getClass())
-            return false;
-
-        HadoopSplitWrapper that = (HadoopSplitWrapper)o;
-
-        return id == that.id;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return id;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
deleted file mode 100644
index abb904c..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.IOException;
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-
-/**
- * Hadoop cleanup task (commits or aborts job).
- */
-public class HadoopV2CleanupTask extends HadoopV2Task {
-    /** Abort flag. */
-    private final boolean abort;
-
-    /**
-     * @param taskInfo Task info.
-     * @param abort Abort flag.
-     */
-    public HadoopV2CleanupTask(HadoopTaskInfo taskInfo, boolean abort) {
-        super(taskInfo);
-
-        this.abort = abort;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ConstantConditions")
-    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
-        JobContextImpl jobCtx = taskCtx.jobContext();
-
-        try {
-            OutputFormat outputFormat = getOutputFormat(jobCtx);
-
-            OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
-
-            if (committer != null) {
-                if (abort)
-                    committer.abortJob(jobCtx, JobStatus.State.FAILED);
-                else
-                    committer.commitJob(jobCtx);
-            }
-        }
-        catch (ClassNotFoundException | IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            throw new IgniteInterruptedCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
deleted file mode 100644
index 2ff2945..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.IOException;
-import java.util.Iterator;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.MapContext;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.mapreduce.ReduceContext;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.hadoop.mapreduce.task.JobContextImpl;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
-
-/**
- * Hadoop context implementation for v2 API. It provides IO operations for hadoop tasks.
- */
-public class HadoopV2Context extends JobContextImpl implements MapContext, ReduceContext {
-    /** Input reader to overriding of HadoopTaskContext input. */
-    private RecordReader reader;
-
-    /** Output writer to overriding of HadoopTaskContext output. */
-    private RecordWriter writer;
-
-    /** Output is provided by executor environment. */
-    private final HadoopTaskOutput output;
-
-    /** Input is provided by executor environment. */
-    private final HadoopTaskInput input;
-
-    /** Unique identifier for a task attempt. */
-    private final TaskAttemptID taskAttemptID;
-
-    /** Indicates that this task is to be cancelled. */
-    private volatile boolean cancelled;
-
-    /** Input split. */
-    private InputSplit inputSplit;
-
-    /** */
-    private final HadoopTaskContext ctx;
-
-    /** */
-    private String status;
-
-    /**
-     * @param ctx Context for IO operations.
-     */
-    public HadoopV2Context(HadoopV2TaskContext ctx) {
-        super(ctx.jobConf(), ctx.jobContext().getJobID());
-
-        taskAttemptID = ctx.attemptId();
-
-        conf.set("mapreduce.job.id", taskAttemptID.getJobID().toString());
-        conf.set("mapreduce.task.id", taskAttemptID.getTaskID().toString());
-
-        output = ctx.output();
-        input = ctx.input();
-
-        this.ctx = ctx;
-    }
-
-    /** {@inheritDoc} */
-    @Override public InputSplit getInputSplit() {
-        if (inputSplit == null) {
-            HadoopInputSplit split = ctx.taskInfo().inputSplit();
-
-            if (split == null)
-                return null;
-
-            if (split instanceof HadoopFileBlock) {
-                HadoopFileBlock fileBlock = (HadoopFileBlock)split;
-
-                inputSplit = new FileSplit(new Path(fileBlock.file()), fileBlock.start(), fileBlock.length(), null);
-            }
-            else
-            {
-                try {
-                    inputSplit = (InputSplit) ((HadoopV2TaskContext)ctx).getNativeSplit(split);
-                } catch (IgniteCheckedException e) {
-                    throw new IllegalStateException(e);
-                }
-            }
-        }
-
-        return inputSplit;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean nextKeyValue() throws IOException, InterruptedException {
-        if (cancelled)
-            throw new HadoopTaskCancelledException("Task cancelled.");
-
-        return reader.nextKeyValue();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object getCurrentKey() throws IOException, InterruptedException {
-        if (reader != null)
-            return reader.getCurrentKey();
-
-        return input.key();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object getCurrentValue() throws IOException, InterruptedException {
-        return reader.getCurrentValue();
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void write(Object key, Object val) throws IOException, InterruptedException {
-        if (cancelled)
-            throw new HadoopTaskCancelledException("Task cancelled.");
-
-        if (writer != null)
-            writer.write(key, val);
-        else {
-            try {
-                output.write(key, val);
-            }
-            catch (IgniteCheckedException e) {
-                throw new IOException(e);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputCommitter getOutputCommitter() {
-        throw new UnsupportedOperationException();
-    }
-
-    /** {@inheritDoc} */
-    @Override public TaskAttemptID getTaskAttemptID() {
-        return taskAttemptID;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setStatus(String msg) {
-        status = msg;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getStatus() {
-        return status;
-    }
-
-    /** {@inheritDoc} */
-    @Override public float getProgress() {
-        return 0.5f; // TODO
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter getCounter(Enum<?> cntrName) {
-        return getCounter(cntrName.getDeclaringClass().getName(), cntrName.name());
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter getCounter(String grpName, String cntrName) {
-        return new HadoopV2Counter(ctx.counter(grpName, cntrName, HadoopLongCounter.class));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void progress() {
-        // No-op.
-    }
-
-    /**
-     * Overrides default input data reader.
-     *
-     * @param reader New reader.
-     */
-    public void reader(RecordReader reader) {
-        this.reader = reader;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean nextKey() throws IOException, InterruptedException {
-        if (cancelled)
-            throw new HadoopTaskCancelledException("Task cancelled.");
-
-        return input.next();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Iterable getValues() throws IOException, InterruptedException {
-        return new Iterable() {
-            @Override public Iterator iterator() {
-                return input.values();
-            }
-        };
-    }
-
-    /**
-     * @return Overridden output data writer.
-     */
-    public RecordWriter writer() {
-        return writer;
-    }
-
-    /**
-     * Overrides default output data writer.
-     *
-     * @param writer New writer.
-     */
-    public void writer(RecordWriter writer) {
-        this.writer = writer;
-    }
-
-    /**
-     * Cancels the task by stop the IO.
-     */
-    public void cancel() {
-        cancelled = true;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
deleted file mode 100644
index cad9e64..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
-
-/**
- * Adapter from own counter implementation into Hadoop API Counter od version 2.0.
- */
-public class HadoopV2Counter implements Counter {
-    /** Delegate. */
-    private final HadoopLongCounter cntr;
-
-    /**
-     * Creates new instance with given delegate.
-     *
-     * @param cntr Internal counter.
-     */
-    public HadoopV2Counter(HadoopLongCounter cntr) {
-        assert cntr != null : "counter must be non-null";
-
-        this.cntr = cntr;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setDisplayName(String displayName) {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getName() {
-        return cntr.name();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getDisplayName() {
-        return getName();
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getValue() {
-        return cntr.value();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setValue(long val) {
-        cntr.value(val);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void increment(long incr) {
-        cntr.increment(incr);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter getUnderlyingCounter() {
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(DataOutput out) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFields(DataInput in) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-}
\ No newline at end of file


[50/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
new file mode 100644
index 0000000..6b5c776
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
@@ -0,0 +1,580 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.igfs.IgfsDirectoryNotEmptyException;
+import org.apache.ignite.igfs.IgfsException;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsParentNotDirectoryException;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathAlreadyExistsException;
+import org.apache.ignite.igfs.IgfsPathNotFoundException;
+import org.apache.ignite.igfs.IgfsUserContext;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
+import org.apache.ignite.internal.processors.hadoop.HadoopPayloadAware;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProperties;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsSecondaryFileSystemPositionedReadable;
+import org.apache.ignite.internal.processors.igfs.IgfsEntryInfo;
+import org.apache.ignite.internal.processors.igfs.IgfsFileImpl;
+import org.apache.ignite.internal.processors.igfs.IgfsSecondaryFileSystemV2;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.lang.IgniteOutClosure;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+/**
+ * Secondary file system which delegates calls to an instance of Hadoop {@link FileSystem}.
+ * <p>
+ * Target {@code FileSystem}'s are created on per-user basis using passed {@link HadoopFileSystemFactory}.
+ */
+public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSystemV2, LifecycleAware,
+    HadoopPayloadAware {
+    /** The default user name. It is used if no user context is set. */
+    private String dfltUsrName;
+
+    /** Factory. */
+    private HadoopFileSystemFactory fsFactory;
+
+    /**
+     * Default constructor for Spring.
+     */
+    public IgniteHadoopIgfsSecondaryFileSystem() {
+        // No-op.
+    }
+
+    /**
+     * Simple constructor that is to be used by default.
+     *
+     * @param uri URI of file system.
+     * @throws IgniteCheckedException In case of error.
+     * @deprecated Use {@link #getFileSystemFactory()} instead.
+     */
+    @Deprecated
+    public IgniteHadoopIgfsSecondaryFileSystem(String uri) throws IgniteCheckedException {
+        this(uri, null, null);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param uri URI of file system.
+     * @param cfgPath Additional path to Hadoop configuration.
+     * @throws IgniteCheckedException In case of error.
+     * @deprecated Use {@link #getFileSystemFactory()} instead.
+     */
+    @Deprecated
+    public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath)
+        throws IgniteCheckedException {
+        this(uri, cfgPath, null);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param uri URI of file system.
+     * @param cfgPath Additional path to Hadoop configuration.
+     * @param userName User name.
+     * @throws IgniteCheckedException In case of error.
+     * @deprecated Use {@link #getFileSystemFactory()} instead.
+     */
+    @Deprecated
+    public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath,
+        @Nullable String userName) throws IgniteCheckedException {
+        setDefaultUserName(userName);
+
+        CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
+
+        fac.setUri(uri);
+
+        if (cfgPath != null)
+            fac.setConfigPaths(cfgPath);
+
+        setFileSystemFactory(fac);
+    }
+
+    /**
+     * Gets default user name.
+     * <p>
+     * Defines user name which will be used during file system invocation in case no user name is defined explicitly
+     * through {@link FileSystem#get(URI, Configuration, String)}.
+     * <p>
+     * Also this name will be used if you manipulate {@link IgniteFileSystem} directly and do not set user name
+     * explicitly using {@link IgfsUserContext#doAs(String, IgniteOutClosure)} or
+     * {@link IgfsUserContext#doAs(String, Callable)} methods.
+     * <p>
+     * If not set value of system property {@code "user.name"} will be used. If this property is not set either,
+     * {@code "anonymous"} will be used.
+     *
+     * @return Default user name.
+     */
+    @Nullable public String getDefaultUserName() {
+        return dfltUsrName;
+    }
+
+    /**
+     * Sets default user name. See {@link #getDefaultUserName()} for details.
+     *
+     * @param dfltUsrName Default user name.
+     */
+    public void setDefaultUserName(@Nullable String dfltUsrName) {
+        this.dfltUsrName = dfltUsrName;
+    }
+
+    /**
+     * Gets secondary file system factory.
+     * <p>
+     * This factory will be used whenever a call to a target {@link FileSystem} is required.
+     * <p>
+     * If not set, {@link CachingHadoopFileSystemFactory} will be used.
+     *
+     * @return Secondary file system factory.
+     */
+    public HadoopFileSystemFactory getFileSystemFactory() {
+        return fsFactory;
+    }
+
+    /**
+     * Sets secondary file system factory. See {@link #getFileSystemFactory()} for details.
+     *
+     * @param factory Secondary file system factory.
+     */
+    public void setFileSystemFactory(HadoopFileSystemFactory factory) {
+        this.fsFactory = factory;
+    }
+
+    /**
+     * Convert IGFS path into Hadoop path.
+     *
+     * @param path IGFS path.
+     * @return Hadoop path.
+     */
+    private Path convert(IgfsPath path) {
+        URI uri = fileSystemForUser().getUri();
+
+        return new Path(uri.getScheme(), uri.getAuthority(), path.toString());
+    }
+
+    /**
+     * Heuristically checks if exception was caused by invalid HDFS version and returns appropriate exception.
+     *
+     * @param e Exception to check.
+     * @param detailMsg Detailed error message.
+     * @return Appropriate exception.
+     */
+    private IgfsException handleSecondaryFsError(IOException e, String detailMsg) {
+        return cast(detailMsg, e);
+    }
+
+    /**
+     * Cast IO exception to IGFS exception.
+     *
+     * @param e IO exception.
+     * @return IGFS exception.
+     */
+    public static IgfsException cast(String msg, IOException e) {
+        if (e instanceof FileNotFoundException)
+            return new IgfsPathNotFoundException(e);
+        else if (e instanceof ParentNotDirectoryException)
+            return new IgfsParentNotDirectoryException(msg, e);
+        else if (e instanceof PathIsNotEmptyDirectoryException)
+            return new IgfsDirectoryNotEmptyException(e);
+        else if (e instanceof PathExistsException)
+            return new IgfsPathAlreadyExistsException(msg, e);
+        else
+            return new IgfsException(msg, e);
+    }
+
+    /**
+     * Convert Hadoop FileStatus properties to map.
+     *
+     * @param status File status.
+     * @return IGFS attributes.
+     */
+    private static Map<String, String> properties(FileStatus status) {
+        FsPermission perm = status.getPermission();
+
+        if (perm == null)
+            perm = FsPermission.getDefault();
+
+        HashMap<String, String> res = new HashMap<>(3);
+
+        res.put(IgfsUtils.PROP_PERMISSION, String.format("%04o", perm.toShort()));
+        res.put(IgfsUtils.PROP_USER_NAME, status.getOwner());
+        res.put(IgfsUtils.PROP_GROUP_NAME, status.getGroup());
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean exists(IgfsPath path) {
+        try {
+            return fileSystemForUser().exists(convert(path));
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to check file existence [path=" + path + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public IgfsFile update(IgfsPath path, Map<String, String> props) {
+        HadoopIgfsProperties props0 = new HadoopIgfsProperties(props);
+
+        final FileSystem fileSys = fileSystemForUser();
+
+        try {
+            if (props0.userName() != null || props0.groupName() != null)
+                fileSys.setOwner(convert(path), props0.userName(), props0.groupName());
+
+            if (props0.permission() != null)
+                fileSys.setPermission(convert(path), props0.permission());
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to update file properties [path=" + path + "]");
+        }
+
+        //Result is not used in case of secondary FS.
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void rename(IgfsPath src, IgfsPath dest) {
+        // Delegate to the secondary file system.
+        try {
+            if (!fileSystemForUser().rename(convert(src), convert(dest)))
+                throw new IgfsException("Failed to rename (secondary file system returned false) " +
+                    "[src=" + src + ", dest=" + dest + ']');
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to rename file [src=" + src + ", dest=" + dest + ']');
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean delete(IgfsPath path, boolean recursive) {
+        try {
+            return fileSystemForUser().delete(convert(path), recursive);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to delete file [path=" + path + ", recursive=" + recursive + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mkdirs(IgfsPath path) {
+        try {
+            if (!fileSystemForUser().mkdirs(convert(path)))
+                throw new IgniteException("Failed to make directories [path=" + path + "]");
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to make directories [path=" + path + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) {
+        try {
+            if (!fileSystemForUser().mkdirs(convert(path), new HadoopIgfsProperties(props).permission()))
+                throw new IgniteException("Failed to make directories [path=" + path + ", props=" + props + "]");
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to make directories [path=" + path + ", props=" + props + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsPath> listPaths(IgfsPath path) {
+        try {
+            FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
+
+            if (statuses == null)
+                throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
+
+            Collection<IgfsPath> res = new ArrayList<>(statuses.length);
+
+            for (FileStatus status : statuses)
+                res.add(new IgfsPath(path, status.getPath().getName()));
+
+            return res;
+        }
+        catch (FileNotFoundException ignored) {
+            throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to list statuses due to secondary file system exception: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsFile> listFiles(IgfsPath path) {
+        try {
+            FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
+
+            if (statuses == null)
+                throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
+
+            Collection<IgfsFile> res = new ArrayList<>(statuses.length);
+
+            for (FileStatus s : statuses) {
+                IgfsEntryInfo fsInfo = s.isDirectory() ?
+                    IgfsUtils.createDirectory(
+                        IgniteUuid.randomUuid(),
+                        null,
+                        properties(s),
+                        s.getAccessTime(),
+                        s.getModificationTime()
+                    ) :
+                    IgfsUtils.createFile(
+                        IgniteUuid.randomUuid(),
+                        (int)s.getBlockSize(),
+                        s.getLen(),
+                        null,
+                        null,
+                        false,
+                        properties(s),
+                        s.getAccessTime(),
+                        s.getModificationTime()
+                    );
+
+                res.add(new IgfsFileImpl(new IgfsPath(path, s.getPath().getName()), fsInfo, 1));
+            }
+
+            return res;
+        }
+        catch (FileNotFoundException ignored) {
+            throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to list statuses due to secondary file system exception: " + path);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) {
+        return new HadoopIgfsSecondaryFileSystemPositionedReadable(fileSystemForUser(), convert(path), bufSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public OutputStream create(IgfsPath path, boolean overwrite) {
+        try {
+            return fileSystemForUser().create(convert(path), overwrite);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", overwrite=" + overwrite + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication,
+        long blockSize, @Nullable Map<String, String> props) {
+        HadoopIgfsProperties props0 =
+            new HadoopIgfsProperties(props != null ? props : Collections.<String, String>emptyMap());
+
+        try {
+            return fileSystemForUser().create(convert(path), props0.permission(), overwrite, bufSize,
+                (short) replication, blockSize, null);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", props=" + props +
+                ", overwrite=" + overwrite + ", bufSize=" + bufSize + ", replication=" + replication +
+                ", blockSize=" + blockSize + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public OutputStream append(IgfsPath path, int bufSize, boolean create,
+        @Nullable Map<String, String> props) {
+        try {
+            return fileSystemForUser().append(convert(path), bufSize);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to append file [path=" + path + ", bufSize=" + bufSize + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile info(final IgfsPath path) {
+        try {
+            final FileStatus status = fileSystemForUser().getFileStatus(convert(path));
+
+            if (status == null)
+                return null;
+
+            final Map<String, String> props = properties(status);
+
+            return new IgfsFile() {
+                @Override public IgfsPath path() {
+                    return path;
+                }
+
+                @Override public boolean isFile() {
+                    return status.isFile();
+                }
+
+                @Override public boolean isDirectory() {
+                    return status.isDirectory();
+                }
+
+                @Override public int blockSize() {
+                    // By convention directory has blockSize == 0, while file has blockSize > 0:
+                    return isDirectory() ? 0 : (int)status.getBlockSize();
+                }
+
+                @Override public long groupBlockSize() {
+                    return status.getBlockSize();
+                }
+
+                @Override public long accessTime() {
+                    return status.getAccessTime();
+                }
+
+                @Override public long modificationTime() {
+                    return status.getModificationTime();
+                }
+
+                @Override public String property(String name) throws IllegalArgumentException {
+                    String val = props.get(name);
+
+                    if (val ==  null)
+                        throw new IllegalArgumentException("File property not found [path=" + path + ", name=" + name + ']');
+
+                    return val;
+                }
+
+                @Nullable @Override public String property(String name, @Nullable String dfltVal) {
+                    String val = props.get(name);
+
+                    return val == null ? dfltVal : val;
+                }
+
+                @Override public long length() {
+                    return status.getLen();
+                }
+
+                /** {@inheritDoc} */
+                @Override public Map<String, String> properties() {
+                    return props;
+                }
+            };
+        }
+        catch (FileNotFoundException ignore) {
+            return null;
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to get file status [path=" + path + "]");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public long usedSpaceSize() {
+        try {
+            // We don't use FileSystem#getUsed() since it counts only the files
+            // in the filesystem root, not all the files recursively.
+            return fileSystemForUser().getContentSummary(new Path("/")).getSpaceConsumed();
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed to get used space size of file system.");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteException {
+        try {
+            // We don't use FileSystem#getUsed() since it counts only the files
+            // in the filesystem root, not all the files recursively.
+            fileSystemForUser().setTimes(convert(path), modificationTime, accessTime);
+        }
+        catch (IOException e) {
+            throw handleSecondaryFsError(e, "Failed set times for path: " + path);
+        }
+    }
+
+    /**
+     * Gets the underlying {@link FileSystem}.
+     * This method is used solely for testing.
+     * @return the underlying Hadoop {@link FileSystem}.
+     */
+    public FileSystem fileSystem() {
+        return fileSystemForUser();
+    }
+
+    /**
+     * Gets the FileSystem for the current context user.
+     * @return the FileSystem instance, never null.
+     */
+    private FileSystem fileSystemForUser() {
+        String user = IgfsUserContext.currentUser();
+
+        if (F.isEmpty(user))
+            user = IgfsUtils.fixUserName(dfltUsrName);
+
+        assert !F.isEmpty(user);
+
+        try {
+            return fsFactory.get(user);
+        }
+        catch (IOException ioe) {
+            throw new IgniteException(ioe);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteException {
+        dfltUsrName = IgfsUtils.fixUserName(dfltUsrName);
+
+        if (fsFactory == null)
+            fsFactory = new CachingHadoopFileSystemFactory();
+
+        if (fsFactory instanceof LifecycleAware)
+            ((LifecycleAware) fsFactory).start();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop() throws IgniteException {
+        if (fsFactory instanceof LifecycleAware)
+            ((LifecycleAware)fsFactory).stop();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopFileSystemFactory getPayload() {
+        return fsFactory;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
new file mode 100644
index 0000000..bbfbc59
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.security.PrivilegedExceptionAction;
+
+/**
+ * Secure Hadoop file system factory that can work with underlying file system protected with Kerberos.
+ * It uses "impersonation" mechanism, to be able to work on behalf of arbitrary client user.
+ * Please see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html for details.
+ * The principal and the key tab name to be used for Kerberos authentication are set explicitly
+ * in the factory configuration.
+ *
+ * <p>This factory does not cache any file system instances. If {@code "fs.[prefix].impl.disable.cache"} is set
+ * to {@code true}, file system instances will be cached by Hadoop.
+ */
+public class KerberosHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** The default interval used to re-login from the key tab, in milliseconds. */
+    public static final long DFLT_RELOGIN_INTERVAL = 10 * 60 * 1000L;
+
+    /** Keytab full file name. */
+    private String keyTab;
+
+    /** Keytab principal. */
+    private String keyTabPrincipal;
+
+    /** The re-login interval. See {@link #getReloginInterval()} for more information. */
+    private long reloginInterval = DFLT_RELOGIN_INTERVAL;
+
+    /** Time of last re-login attempt, in system milliseconds. */
+    private transient volatile long lastReloginTime;
+
+    /**
+     * Constructor.
+     */
+    public KerberosHadoopFileSystemFactory() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileSystem getWithMappedName(String name) throws IOException {
+        reloginIfNeeded();
+
+        return super.getWithMappedName(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected FileSystem create(String usrName) throws IOException, InterruptedException {
+        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUser(usrName,
+            UserGroupInformation.getLoginUser());
+
+        return proxyUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+            @Override public FileSystem run() throws Exception {
+                return FileSystem.get(fullUri, cfg);
+            }
+        });
+    }
+
+    /**
+     * Gets the key tab principal short name (e.g. "hdfs").
+     *
+     * @return The key tab principal.
+     */
+    @Nullable public String getKeyTabPrincipal() {
+        return keyTabPrincipal;
+    }
+
+    /**
+     * Set the key tab principal name. See {@link #getKeyTabPrincipal()} for more information.
+     *
+     * @param keyTabPrincipal The key tab principal name.
+     */
+    public void setKeyTabPrincipal(@Nullable String keyTabPrincipal) {
+        this.keyTabPrincipal = keyTabPrincipal;
+    }
+
+    /**
+     * Gets the key tab full file name (e.g. "/etc/security/keytabs/hdfs.headless.keytab" or "/etc/krb5.keytab").
+     * <p>
+     * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
+     * {@link IgniteHadoopFileSystem} resides. Corresponding path must exist on these machines as well.
+     *
+     * @return The key tab file name.
+     */
+    @Nullable public String getKeyTab() {
+        return keyTab;
+    }
+
+    /**
+     * Sets the key tab file name. See {@link #getKeyTab()} for more information.
+     *
+     * @param keyTab The key tab file name.
+     */
+    public void setKeyTab(@Nullable String keyTab) {
+        this.keyTab = keyTab;
+    }
+
+    /**
+     * The interval used to re-login from the key tab, in milliseconds.
+     * Important that the value should not be larger than the Kerberos ticket life time multiplied by 0.2. This is
+     * because the ticket renew window starts from {@code 0.8 * ticket life time}.
+     * Default ticket life time is 1 day (24 hours), so the default re-login interval (10 min)
+     * is obeys this rule well.
+     *
+     * <p>Zero value means that re-login should be attempted on each file system operation.
+     * Negative values are not allowed.
+     *
+     * <p>Note, however, that it does not make sense to make this value small, because Hadoop does not allow to
+     * login if less than {@link org.apache.hadoop.security.UserGroupInformation#MIN_TIME_BEFORE_RELOGIN} milliseconds
+     * have passed since the time of the previous login.
+     * See {@link org.apache.hadoop.security.UserGroupInformation#hasSufficientTimeElapsed(long)} and its usages for
+     * more detail.
+     *
+     * @return The re-login interval, in milliseconds.
+     */
+    public long getReloginInterval() {
+        return reloginInterval;
+    }
+
+    /**
+     * Sets the relogin interval in milliseconds. See {@link #getReloginInterval()} for more information.
+     *
+     * @param reloginInterval The re-login interval, in milliseconds.
+     */
+    public void setReloginInterval(long reloginInterval) {
+        this.reloginInterval = reloginInterval;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteException {
+        A.ensure(!F.isEmpty(keyTab), "keyTab cannot not be empty.");
+        A.ensure(!F.isEmpty(keyTabPrincipal), "keyTabPrincipal cannot not be empty.");
+        A.ensure(reloginInterval >= 0, "reloginInterval cannot not be negative.");
+
+        super.start();
+
+        try {
+            UserGroupInformation.setConfiguration(cfg);
+            UserGroupInformation.loginUserFromKeytab(keyTabPrincipal, keyTab);
+        }
+        catch (IOException ioe) {
+            throw new IgniteException("Failed login from keytab [keyTab=" + keyTab +
+                ", keyTabPrincipal=" + keyTabPrincipal + ']', ioe);
+        }
+    }
+
+    /**
+     * Re-logins the user if needed.
+     * First, the re-login interval defined in factory is checked. The re-login attempts will be not more
+     * frequent than one attempt per {@code reloginInterval}.
+     * Second, {@link UserGroupInformation#checkTGTAndReloginFromKeytab()} method invoked that gets existing
+     * TGT and checks its validity. If the TGT is expired or is close to expiry, it performs re-login.
+     *
+     * <p>This operation expected to be called upon each operation with the file system created with the factory.
+     * As long as {@link #get(String)} operation is invoked upon each file {@link IgniteHadoopFileSystem}, there
+     * is no need to invoke it otherwise specially.
+     *
+     * @throws IOException If login fails.
+     */
+    private void reloginIfNeeded() throws IOException {
+        long now = System.currentTimeMillis();
+
+        if (now >= lastReloginTime + reloginInterval) {
+            UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
+
+            lastReloginTime = now;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        super.writeExternal(out);
+
+        U.writeString(out, keyTab);
+        U.writeString(out, keyTabPrincipal);
+        out.writeLong(reloginInterval);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        super.readExternal(in);
+
+        keyTab = U.readString(in);
+        keyTabPrincipal = U.readString(in);
+        reloginInterval = in.readLong();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/package-info.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
new file mode 100644
index 0000000..164801f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Ignite Hadoop Accelerator file system API.
+ */
+package org.apache.ignite.hadoop.fs;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
new file mode 100644
index 0000000..a06129e
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
@@ -0,0 +1,1364 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs.v1;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Progressable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsException;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathSummary;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyInputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyOutputStream;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsModeResolver;
+import org.apache.ignite.internal.processors.igfs.IgfsPaths;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
+import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_COLOCATED_WRITES;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_BATCH_SIZE;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
+import static org.apache.ignite.internal.processors.igfs.IgfsEx.IGFS_SCHEME;
+
+/**
+ * {@code IGFS} Hadoop 1.x file system driver over file system API. To use
+ * {@code IGFS} as Hadoop file system, you should configure this class
+ * in Hadoop's {@code core-site.xml} as follows:
+ * <pre name="code" class="xml">
+ *  &lt;property&gt;
+ *      &lt;name&gt;fs.default.name&lt;/name&gt;
+ *      &lt;value&gt;igfs:///&lt;/value&gt;
+ *  &lt;/property&gt;
+ *
+ *  &lt;property&gt;
+ *      &lt;name&gt;fs.igfs.impl&lt;/name&gt;
+ *      &lt;value&gt;org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem&lt;/value&gt;
+ *  &lt;/property&gt;
+ * </pre>
+ * You should also add Ignite JAR and all libraries to Hadoop classpath. To
+ * do this, add following lines to {@code conf/hadoop-env.sh} script in Hadoop
+ * distribution:
+ * <pre name="code" class="bash">
+ * export IGNITE_HOME=/path/to/Ignite/distribution
+ * export HADOOP_CLASSPATH=$IGNITE_HOME/ignite*.jar
+ *
+ * for f in $IGNITE_HOME/libs/*.jar; do
+ *  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f;
+ * done
+ * </pre>
+ * <h1 class="header">Data vs Clients Nodes</h1>
+ * Hadoop needs to use its FileSystem remotely from client nodes as well as directly on
+ * data nodes. Client nodes are responsible for basic file system operations as well as
+ * accessing data nodes remotely. Usually, client nodes are started together
+ * with {@code job-submitter} or {@code job-scheduler} processes, while data nodes are usually
+ * started together with Hadoop {@code task-tracker} processes.
+ * <p>
+ * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml}
+ * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation.
+ */
+public class IgniteHadoopFileSystem extends FileSystem {
+    /** Internal property to indicate management connection. */
+    public static final String IGFS_MANAGEMENT = "fs.igfs.management.connection";
+
+    /** Empty array of file block locations. */
+    private static final BlockLocation[] EMPTY_BLOCK_LOCATIONS = new BlockLocation[0];
+
+    /** Empty array of file statuses. */
+    public static final FileStatus[] EMPTY_FILE_STATUS = new FileStatus[0];
+
+    /** Ensures that close routine is invoked at most once. */
+    private final AtomicBoolean closeGuard = new AtomicBoolean();
+
+    /** Grid remote client. */
+    private HadoopIgfsWrapper rmtClient;
+
+    /** working directory. */
+    private Path workingDir;
+
+    /** Default replication factor. */
+    private short dfltReplication;
+
+    /** Base file system uri. */
+    @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+    private URI uri;
+
+    /** Authority. */
+    private String uriAuthority;
+
+    /** Client logger. */
+    private IgfsLogger clientLog;
+
+    /** Secondary URI string. */
+    private URI secondaryUri;
+
+    /** The user name this file system was created on behalf of. */
+    private String user;
+
+    /** IGFS mode resolver. */
+    private IgfsModeResolver modeRslvr;
+
+    /** The secondary file system factory. */
+    private HadoopFileSystemFactory factory;
+
+    /** Management connection flag. */
+    private boolean mgmt;
+
+    /** Whether custom sequential reads before prefetch value is provided. */
+    private boolean seqReadsBeforePrefetchOverride;
+
+    /** IGFS group block size. */
+    private long igfsGrpBlockSize;
+
+    /** Flag that controls whether file writes should be colocated. */
+    private boolean colocateFileWrites;
+
+    /** Prefer local writes. */
+    private boolean preferLocFileWrites;
+
+    /** Custom-provided sequential reads before prefetch. */
+    private int seqReadsBeforePrefetch;
+
+    /** {@inheritDoc} */
+    @Override public URI getUri() {
+        if (uri == null)
+            throw new IllegalStateException("URI is null (was IgniteHadoopFileSystem properly initialized?).");
+
+        return uri;
+    }
+
+    /**
+     * Enter busy state.
+     *
+     * @throws IOException If file system is stopped.
+     */
+    private void enterBusy() throws IOException {
+        if (closeGuard.get())
+            throw new IOException("File system is stopped.");
+    }
+
+    /**
+     * Leave busy state.
+     */
+    private void leaveBusy() {
+        // No-op.
+    }
+
+    /**
+     * Gets non-null user name as per the Hadoop file system viewpoint.
+     * @return the user name, never null.
+     */
+    public static String getFsHadoopUser() throws IOException {
+        UserGroupInformation currUgi = UserGroupInformation.getCurrentUser();
+
+        String user = currUgi.getShortUserName();
+
+        user = IgfsUtils.fixUserName(user);
+
+        assert user != null;
+
+        return user;
+    }
+
+    /**
+     * Public setter that can be used by direct users of FS or Visor.
+     *
+     * @param colocateFileWrites Whether all ongoing file writes should be colocated.
+     */
+    @SuppressWarnings("UnusedDeclaration")
+    public void colocateFileWrites(boolean colocateFileWrites) {
+        this.colocateFileWrites = colocateFileWrites;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ConstantConditions")
+    @Override public void initialize(URI name, Configuration cfg) throws IOException {
+        enterBusy();
+
+        try {
+            if (rmtClient != null)
+                throw new IOException("File system is already initialized: " + rmtClient);
+
+            A.notNull(name, "name");
+            A.notNull(cfg, "cfg");
+
+            super.initialize(name, cfg);
+
+            setConf(cfg);
+
+            mgmt = cfg.getBoolean(IGFS_MANAGEMENT, false);
+
+            if (!IGFS_SCHEME.equals(name.getScheme()))
+                throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME +
+                    "://[name]/[optional_path], actual=" + name + ']');
+
+            uri = name;
+
+            uriAuthority = uri.getAuthority();
+
+            user = getFsHadoopUser();
+
+            // Override sequential reads before prefetch if needed.
+            seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
+
+            if (seqReadsBeforePrefetch > 0)
+                seqReadsBeforePrefetchOverride = true;
+
+            // In Ignite replication factor is controlled by data cache affinity.
+            // We use replication factor to force the whole file to be stored on local node.
+            dfltReplication = (short)cfg.getInt("dfs.replication", 3);
+
+            // Get file colocation control flag.
+            colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
+            preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
+
+            // Get log directory.
+            String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
+
+            File logDirFile = U.resolveIgnitePath(logDirCfg);
+
+            String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
+
+            rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
+
+            // Handshake.
+            IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
+
+            igfsGrpBlockSize = handshake.blockSize();
+
+            IgfsPaths paths = handshake.secondaryPaths();
+
+            // Initialize client logger.
+            Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
+
+            if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
+                // Initiate client logger.
+                if (logDir == null)
+                    throw new IOException("Failed to resolve log directory: " + logDirCfg);
+
+                Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
+
+                clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
+            }
+            else
+                clientLog = IgfsLogger.disabledLogger();
+
+            try {
+                modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
+            }
+            catch (IgniteCheckedException ice) {
+                throw new IOException(ice);
+            }
+
+            boolean initSecondary = paths.defaultMode() == PROXY;
+
+            if (!initSecondary && paths.pathModes() != null && !paths.pathModes().isEmpty()) {
+                for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
+                    IgfsMode mode = pathMode.getValue();
+
+                    if (mode == PROXY) {
+                        initSecondary = true;
+
+                        break;
+                    }
+                }
+            }
+
+            if (initSecondary) {
+                try {
+                    factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IOException("Failed to get secondary file system factory.", e);
+                }
+
+                if (factory == null)
+                    throw new IOException("Failed to get secondary file system factory (did you set " +
+                        IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " +
+                        FileSystemConfiguration.class.getName() + "?)");
+
+                if (factory instanceof LifecycleAware)
+                    ((LifecycleAware) factory).start();
+
+                try {
+                    FileSystem secFs = factory.get(user);
+
+                    secondaryUri = secFs.getUri();
+
+                    A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
+                }
+                catch (IOException e) {
+                    if (!mgmt)
+                        throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
+                    else
+                        LOG.warn("Visor failed to create secondary file system (operations on paths with PROXY mode " +
+                            "will have no effect): " + e.getMessage());
+                }
+            }
+
+            // set working directory to the home directory of the current Fs user:
+            setWorkingDirectory(null);
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void checkPath(Path path) {
+        URI uri = path.toUri();
+
+        if (uri.isAbsolute()) {
+            if (!F.eq(uri.getScheme(), IGFS_SCHEME))
+                throw new InvalidPathException("Wrong path scheme [expected=" + IGFS_SCHEME + ", actual=" +
+                    uri.getAuthority() + ']');
+
+            if (!F.eq(uri.getAuthority(), uriAuthority))
+                throw new InvalidPathException("Wrong path authority [expected=" + uriAuthority + ", actual=" +
+                    uri.getAuthority() + ']');
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public short getDefaultReplication() {
+        return dfltReplication;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void finalize() throws Throwable {
+        super.finalize();
+
+        close();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        if (closeGuard.compareAndSet(false, true))
+            close0();
+    }
+
+    /**
+     * Closes file system.
+     *
+     * @throws IOException If failed.
+     */
+    private void close0() throws IOException {
+        if (LOG.isDebugEnabled())
+            LOG.debug("File system closed [uri=" + uri + ", endpoint=" + uriAuthority + ']');
+
+        if (rmtClient == null)
+            return;
+
+        super.close();
+
+        rmtClient.close(false);
+
+        if (clientLog.isLogEnabled())
+            clientLog.close();
+
+        if (factory instanceof LifecycleAware)
+            ((LifecycleAware) factory).stop();
+
+        // Reset initialized resources.
+        uri = null;
+        rmtClient = null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTimes(Path p, long mtime, long atime) throws IOException {
+        enterBusy();
+
+        try {
+            A.notNull(p, "p");
+
+            if (mode(p) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    // No-op for management connection.
+                    return;
+                }
+
+                secondaryFs.setTimes(toSecondary(p), mtime, atime);
+            }
+            else {
+                IgfsPath path = convert(p);
+
+                rmtClient.setTimes(path, atime, mtime);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setPermission(Path p, FsPermission perm) throws IOException {
+        enterBusy();
+
+        try {
+            A.notNull(p, "p");
+
+            if (mode(p) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    // No-op for management connection.
+                    return;
+                }
+
+                secondaryFs.setPermission(toSecondary(p), perm);
+            }
+            else if (rmtClient.update(convert(p), permission(perm)) == null) {
+                throw new IOException("Failed to set file permission (file not found?)" +
+                    " [path=" + p + ", perm=" + perm + ']');
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setOwner(Path p, String username, String grpName) throws IOException {
+        A.notNull(p, "p");
+        A.notNull(username, "username");
+        A.notNull(grpName, "grpName");
+
+        enterBusy();
+
+        try {
+            if (mode(p) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    // No-op for management connection.
+                    return;
+                }
+
+                secondaryFs.setOwner(toSecondary(p), username, grpName);
+            }
+            else if (rmtClient.update(convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, username,
+                IgfsUtils.PROP_GROUP_NAME, grpName)) == null) {
+                throw new IOException("Failed to set file permission (file not found?)" +
+                    " [path=" + p + ", userName=" + username + ", groupName=" + grpName + ']');
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FSDataInputStream open(Path f, int bufSize) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    throw new IOException("Failed to open file (secondary file system is not initialized): " + f);
+                }
+
+                FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize);
+
+                if (clientLog.isLogEnabled()) {
+                    // At this point we do not know file size, so we perform additional request to remote FS to get it.
+                    FileStatus status = secondaryFs.getFileStatus(toSecondary(f));
+
+                    long size = status != null ? status.getLen() : -1;
+
+                    long logId = IgfsLogger.nextId();
+
+                    clientLog.logOpen(logId, path, PROXY, bufSize, size);
+
+                    return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId));
+                }
+                else
+                    return is;
+            }
+            else {
+                HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ?
+                    rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
+
+                long logId = -1;
+
+                if (clientLog.isLogEnabled()) {
+                    logId = IgfsLogger.nextId();
+
+                    clientLog.logOpen(logId, path, mode, bufSize, stream.length());
+                }
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path +
+                        ", bufSize=" + bufSize + ']');
+
+                HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(),
+                    bufSize, LOG, clientLog, logId);
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
+
+                return new FSDataInputStream(igfsIn);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize,
+        short replication, long blockSize, Progressable progress) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        OutputStream out = null;
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (LOG.isDebugEnabled())
+                LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" +
+                    path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    throw new IOException("Failed to create file (secondary file system is not initialized): " + f);
+                }
+
+                FSDataOutputStream os =
+                    secondaryFs.create(toSecondary(f), perm, overwrite, bufSize, replication, blockSize, progress);
+
+                if (clientLog.isLogEnabled()) {
+                    long logId = IgfsLogger.nextId();
+
+                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);
+
+                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
+                }
+                else
+                    return os;
+            }
+            else {
+                Map<String,String> propMap = permission(perm);
+
+                propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
+
+                // Create stream and close it in the 'finally' section if any sequential operation failed.
+                HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites,
+                    replication, blockSize, propMap);
+
+                assert stream != null;
+
+                long logId = -1;
+
+                if (clientLog.isLogEnabled()) {
+                    logId = IgfsLogger.nextId();
+
+                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
+                }
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
+
+                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog,
+                    logId);
+
+                bufSize = Math.max(64 * 1024, bufSize);
+
+                out = new BufferedOutputStream(igfsOut, bufSize);
+
+                FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
+
+                // Mark stream created successfully.
+                out = null;
+
+                return res;
+            }
+        }
+        finally {
+            // Close if failed during stream creation.
+            if (out != null)
+                U.closeQuiet(out);
+
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (LOG.isDebugEnabled())
+                LOG.debug("Opening output stream in append [thread=" + Thread.currentThread().getName() +
+                    ", path=" + path + ", bufSize=" + bufSize + ']');
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    throw new IOException("Failed to append file (secondary file system is not initialized): " + f);
+                }
+
+                FSDataOutputStream os = secondaryFs.append(toSecondary(f), bufSize, progress);
+
+                if (clientLog.isLogEnabled()) {
+                    long logId = IgfsLogger.nextId();
+
+                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
+
+                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
+                }
+                else
+                    return os;
+            }
+            else {
+                HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null);
+
+                assert stream != null;
+
+                long logId = -1;
+
+                if (clientLog.isLogEnabled()) {
+                    logId = IgfsLogger.nextId();
+
+                    clientLog.logAppend(logId, path, mode, bufSize);
+                }
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
+
+                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog,
+                    logId);
+
+                bufSize = Math.max(64 * 1024, bufSize);
+
+                BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize);
+
+                return new FSDataOutputStream(out, null, 0);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public boolean rename(Path src, Path dst) throws IOException {
+        A.notNull(src, "src");
+        A.notNull(dst, "dst");
+
+        enterBusy();
+
+        try {
+            IgfsPath srcPath = convert(src);
+            IgfsPath dstPath = convert(dst);
+            IgfsMode mode = mode(srcPath);
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    return false;
+                }
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logRename(srcPath, PROXY, dstPath);
+
+                return secondaryFs.rename(toSecondary(src), toSecondary(dst));
+            }
+            else {
+                if (clientLog.isLogEnabled())
+                    clientLog.logRename(srcPath, mode, dstPath);
+
+                try {
+                    rmtClient.rename(srcPath, dstPath);
+                }
+                catch (IOException ioe) {
+                    // Log the exception before rethrowing since it may be ignored:
+                    LOG.warn("Failed to rename [srcPath=" + srcPath + ", dstPath=" + dstPath + ", mode=" + mode + ']',
+                        ioe);
+
+                    throw ioe;
+                }
+
+                return true;
+            }
+        }
+        catch (IOException e) {
+            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
+            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
+                !X.hasCause(e.getCause(), IgfsException.class)))
+                throw e;
+            else
+                return false;
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public boolean delete(Path f) throws IOException {
+        return delete(f, false);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public boolean delete(Path f, boolean recursive) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    return false;
+                }
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logDelete(path, PROXY, recursive);
+
+                return secondaryFs.delete(toSecondary(f), recursive);
+            }
+            else {
+                // Will throw exception if delete failed.
+                boolean res = rmtClient.delete(path, recursive);
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logDelete(path, mode, recursive);
+
+                return res;
+            }
+        }
+        catch (IOException e) {
+            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
+            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
+                !X.hasCause(e.getCause(), IgfsException.class)))
+                throw e;
+            else
+                return false;
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus[] listStatus(Path f) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    return EMPTY_FILE_STATUS;
+                }
+
+                FileStatus[] arr = secondaryFs.listStatus(toSecondary(f));
+
+                if (arr == null)
+                    throw new FileNotFoundException("File " + f + " does not exist.");
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = toPrimary(arr[i]);
+
+                if (clientLog.isLogEnabled()) {
+                    String[] fileArr = new String[arr.length];
+
+                    for (int i = 0; i < arr.length; i++)
+                        fileArr[i] = arr[i].getPath().toString();
+
+                    clientLog.logListDirectory(path, PROXY, fileArr);
+                }
+
+                return arr;
+            }
+            else {
+                Collection<IgfsFile> list = rmtClient.listFiles(path);
+
+                if (list == null)
+                    throw new FileNotFoundException("File " + f + " does not exist.");
+
+                List<IgfsFile> files = new ArrayList<>(list);
+
+                FileStatus[] arr = new FileStatus[files.size()];
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = convert(files.get(i));
+
+                if (clientLog.isLogEnabled()) {
+                    String[] fileArr = new String[arr.length];
+
+                    for (int i = 0; i < arr.length; i++)
+                        fileArr[i] = arr[i].getPath().toString();
+
+                    clientLog.logListDirectory(path, mode, fileArr);
+                }
+
+                return arr;
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getHomeDirectory() {
+        Path path = new Path("/user/" + user);
+
+        return path.makeQualified(getUri(), null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setWorkingDirectory(Path newPath) {
+        try {
+            if (newPath == null) {
+                Path homeDir = getHomeDirectory();
+
+                FileSystem secondaryFs  = secondaryFileSystem();
+
+                if (secondaryFs != null)
+                    secondaryFs.setWorkingDirectory(toSecondary(homeDir));
+
+                workingDir = homeDir;
+            }
+            else {
+                Path fixedNewPath = fixRelativePart(newPath);
+
+                String res = fixedNewPath.toUri().getPath();
+
+                if (!DFSUtil.isValidName(res))
+                    throw new IllegalArgumentException("Invalid DFS directory name " + res);
+
+                FileSystem secondaryFs  = secondaryFileSystem();
+
+                if (secondaryFs != null)
+                    secondaryFs.setWorkingDirectory(toSecondary(fixedNewPath));
+
+                workingDir = fixedNewPath;
+            }
+        }
+        catch (IOException e) {
+            throw new RuntimeException("Failed to obtain secondary file system instance.", e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Path getWorkingDirectory() {
+        return workingDir;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public boolean mkdirs(Path f, FsPermission perm) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(f);
+            IgfsMode mode = mode(path);
+
+            if (mode == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    return false;
+                }
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logMakeDirectory(path, PROXY);
+
+                return secondaryFs.mkdirs(toSecondary(f), perm);
+            }
+            else {
+                boolean mkdirRes = rmtClient.mkdirs(path, permission(perm));
+
+                if (clientLog.isLogEnabled())
+                    clientLog.logMakeDirectory(path, mode);
+
+                return mkdirRes;
+            }
+        }
+        catch (IOException e) {
+            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
+            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
+                !X.hasCause(e.getCause(), IgfsException.class)))
+                throw e;
+            else
+                return false;
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileStatus getFileStatus(Path f) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            if (mode(f) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    throw new IOException("Failed to get file status (secondary file system is not initialized): " + f);
+                }
+
+                return toPrimary(secondaryFs.getFileStatus(toSecondary(f)));
+            }
+            else {
+                IgfsFile info = rmtClient.info(convert(f));
+
+                if (info == null)
+                    throw new FileNotFoundException("File not found: " + f);
+
+                return convert(info);
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public ContentSummary getContentSummary(Path f) throws IOException {
+        A.notNull(f, "f");
+
+        enterBusy();
+
+        try {
+            if (mode(f) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    throw new IOException("Failed to get content summary (secondary file system is not initialized): " +
+                        f);
+                }
+
+                return secondaryFs.getContentSummary(toSecondary(f));
+            }
+            else {
+                IgfsPathSummary sum = rmtClient.contentSummary(convert(f));
+
+                return new ContentSummary(sum.totalLength(), sum.filesCount(), sum.directoriesCount(),
+                    -1, sum.totalLength(), rmtClient.fsStatus().spaceTotal());
+            }
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public BlockLocation[] getFileBlockLocations(FileStatus status, long start, long len) throws IOException {
+        A.notNull(status, "status");
+
+        enterBusy();
+
+        try {
+            IgfsPath path = convert(status.getPath());
+
+            if (mode(status.getPath()) == PROXY) {
+                final FileSystem secondaryFs = secondaryFileSystem();
+
+                if (secondaryFs == null) {
+                    assert mgmt;
+
+                    return EMPTY_BLOCK_LOCATIONS;
+                }
+
+                Path secPath = toSecondary(status.getPath());
+
+                return secondaryFs.getFileBlockLocations(secondaryFs.getFileStatus(secPath), start, len);
+            }
+            else {
+                long now = System.currentTimeMillis();
+
+                List<IgfsBlockLocation> affinity = new ArrayList<>(rmtClient.affinity(path, start, len));
+
+                BlockLocation[] arr = new BlockLocation[affinity.size()];
+
+                for (int i = 0; i < arr.length; i++)
+                    arr[i] = convert(affinity.get(i));
+
+                if (LOG.isDebugEnabled())
+                    LOG.debug("Fetched file locations [path=" + path + ", fetchTime=" +
+                        (System.currentTimeMillis() - now) + ", locations=" + Arrays.asList(arr) + ']');
+
+                return arr;
+            }
+        }
+        catch (FileNotFoundException ignored) {
+            return EMPTY_BLOCK_LOCATIONS;
+        }
+        finally {
+            leaveBusy();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public long getDefaultBlockSize() {
+        return igfsGrpBlockSize;
+    }
+
+    /**
+     * Resolve path mode.
+     *
+     * @param path HDFS path.
+     * @return Path mode.
+     */
+    public IgfsMode mode(Path path) {
+        return mode(convert(path));
+    }
+
+    /**
+     * Resolve path mode.
+     *
+     * @param path IGFS path.
+     * @return Path mode.
+     */
+    public IgfsMode mode(IgfsPath path) {
+        return modeRslvr.resolveMode(path);
+    }
+
+    /**
+     * @return {@code true} If secondary file system is initialized.
+     */
+    public boolean hasSecondaryFileSystem() {
+        return factory != null;
+    }
+
+    /**
+     * Convert the given path to path acceptable by the primary file system.
+     *
+     * @param path Path.
+     * @return Primary file system path.
+     */
+    private Path toPrimary(Path path) {
+        return convertPath(path, uri);
+    }
+
+    /**
+     * Convert the given path to path acceptable by the secondary file system.
+     *
+     * @param path Path.
+     * @return Secondary file system path.
+     */
+    private Path toSecondary(Path path) {
+        assert factory != null;
+        assert secondaryUri != null;
+
+        return convertPath(path, secondaryUri);
+    }
+
+    /**
+     * Convert path using the given new URI.
+     *
+     * @param path Old path.
+     * @param newUri New URI.
+     * @return New path.
+     */
+    private Path convertPath(Path path, URI newUri) {
+        assert newUri != null;
+
+        if (path != null) {
+            URI pathUri = path.toUri();
+
+            try {
+                return new Path(new URI(pathUri.getScheme() != null ? newUri.getScheme() : null,
+                    pathUri.getAuthority() != null ? newUri.getAuthority() : null, pathUri.getPath(), null, null));
+            }
+            catch (URISyntaxException e) {
+                throw new IgniteException("Failed to construct secondary file system path from the primary file " +
+                    "system path: " + path, e);
+            }
+        }
+        else
+            return null;
+    }
+
+    /**
+     * Convert a file status obtained from the secondary file system to a status of the primary file system.
+     *
+     * @param status Secondary file system status.
+     * @return Primary file system status.
+     */
+    @SuppressWarnings("deprecation")
+    private FileStatus toPrimary(FileStatus status) {
+        return status != null ? new FileStatus(status.getLen(), status.isDir(), status.getReplication(),
+            status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(),
+            status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null;
+    }
+
+    /**
+     * Convert IGFS path into Hadoop path.
+     *
+     * @param path IGFS path.
+     * @return Hadoop path.
+     */
+    private Path convert(IgfsPath path) {
+        return new Path(IGFS_SCHEME, uriAuthority, path.toString());
+    }
+
+    /**
+     * Convert Hadoop path into IGFS path.
+     *
+     * @param path Hadoop path.
+     * @return IGFS path.
+     */
+    @Nullable private IgfsPath convert(@Nullable Path path) {
+        if (path == null)
+            return null;
+
+        return path.isAbsolute() ? new IgfsPath(path.toUri().getPath()) :
+            new IgfsPath(convert(workingDir), path.toUri().getPath());
+    }
+
+    /**
+     * Convert IGFS affinity block location into Hadoop affinity block location.
+     *
+     * @param block IGFS affinity block location.
+     * @return Hadoop affinity block location.
+     */
+    private BlockLocation convert(IgfsBlockLocation block) {
+        Collection<String> names = block.names();
+        Collection<String> hosts = block.hosts();
+
+        return new BlockLocation(
+            names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
+            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */,
+            block.start(), block.length()
+        ) {
+            @Override public String toString() {
+                try {
+                    return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() +
+                        ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
+                }
+                catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        };
+    }
+
+    /**
+     * Convert IGFS file information into Hadoop file status.
+     *
+     * @param file IGFS file information.
+     * @return Hadoop file status.
+     */
+    @SuppressWarnings("deprecation")
+    private FileStatus convert(IgfsFile file) {
+        return new FileStatus(
+            file.length(),
+            file.isDirectory(),
+            getDefaultReplication(),
+            file.groupBlockSize(),
+            file.modificationTime(),
+            file.accessTime(),
+            permission(file),
+            file.property(IgfsUtils.PROP_USER_NAME, user),
+            file.property(IgfsUtils.PROP_GROUP_NAME, "users"),
+            convert(file.path())) {
+            @Override public String toString() {
+                return "FileStatus [path=" + getPath() + ", isDir=" + isDir() + ", len=" + getLen() +
+                    ", mtime=" + getModificationTime() + ", atime=" + getAccessTime() + ']';
+            }
+        };
+    }
+
+    /**
+     * Convert Hadoop permission into IGFS file attribute.
+     *
+     * @param perm Hadoop permission.
+     * @return IGFS attributes.
+     */
+    private Map<String, String> permission(FsPermission perm) {
+        if (perm == null)
+            perm = FsPermission.getDefault();
+
+        return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
+    }
+
+    /**
+     * @param perm Permission.
+     * @return String.
+     */
+    private static String toString(FsPermission perm) {
+        return String.format("%04o", perm.toShort());
+    }
+
+    /**
+     * Convert IGFS file attributes into Hadoop permission.
+     *
+     * @param file File info.
+     * @return Hadoop permission.
+     */
+    private FsPermission permission(IgfsFile file) {
+        String perm = file.property(IgfsUtils.PROP_PERMISSION, null);
+
+        if (perm == null)
+            return FsPermission.getDefault();
+
+        try {
+            return new FsPermission((short)Integer.parseInt(perm, 8));
+        }
+        catch (NumberFormatException ignore) {
+            return FsPermission.getDefault();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(IgniteHadoopFileSystem.class, this);
+    }
+
+    /**
+     * Returns the user name this File System is created on behalf of.
+     * @return the user name
+     */
+    public String user() {
+        return user;
+    }
+
+    /**
+     * Gets cached or creates a {@link FileSystem}.
+     *
+     * @return The secondary file system.
+     */
+    private @Nullable FileSystem secondaryFileSystem() throws IOException{
+        if (factory == null)
+            return null;
+
+        return factory.get(user);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
new file mode 100644
index 0000000..60e62ca
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains Ignite Hadoop 1.x <code>FileSystem</code> implementation.
+ */
+package org.apache.ignite.hadoop.fs.v1;
\ No newline at end of file


[26/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/huckleberry-finn.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/huckleberry-finn.txt b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/huckleberry-finn.txt
new file mode 100644
index 0000000..3af8c6b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/huckleberry-finn.txt
@@ -0,0 +1,11733 @@
+The Project Gutenberg EBook of Adventures of Huckleberry Finn, Complete
+by Mark Twain (Samuel Clemens)
+
+This eBook is for the use of anyone anywhere at no cost and with
+almost no restrictions whatsoever.  You may copy it, give it away or
+re-use it under the terms of the Project Gutenberg License included
+with this eBook or online at www.gutenberg.net
+
+
+Title: Adventures of Huckleberry Finn, Complete
+
+Author: Mark Twain (Samuel Clemens)
+
+Release Date: August 20, 2006 [EBook #76]
+[This file last updated May 3, 2011]
+
+Language: English
+
+
+*** START OF THIS PROJECT GUTENBERG EBOOK HUCKLEBERRY FINN ***
+
+
+
+
+Produced by David Widger. Previous editions produced by Ron Burkey
+and Internet Wiretap
+
+
+
+
+
+ADVENTURES OF HUCKLEBERRY FINN
+
+By Mark Twain
+
+
+
+NOTICE
+
+PERSONS attempting to find a motive in this narrative will be prosecuted;
+persons attempting to find a moral in it will be banished; persons
+attempting to find a plot in it will be shot.
+
+BY ORDER OF THE AUTHOR, Per G.G., Chief of Ordnance.
+
+
+
+
+EXPLANATORY
+
+IN this book a number of dialects are used, to wit:  the Missouri negro
+dialect; the extremest form of the backwoods Southwestern dialect; the
+ordinary "Pike County" dialect; and four modified varieties of this last.
+The shadings have not been done in a haphazard fashion, or by guesswork;
+but painstakingly, and with the trustworthy guidance and support of
+personal familiarity with these several forms of speech.
+
+I make this explanation for the reason that without it many readers would
+suppose that all these characters were trying to talk alike and not
+succeeding.
+
+THE AUTHOR.
+
+
+
+
+
+ADVENTURES OF HUCKLEBERRY FINN
+
+Scene:  The Mississippi Valley Time:  Forty to fifty years ago
+
+
+
+CHAPTER I.
+
+YOU don't know about me without you have read a book by the name of The
+Adventures of Tom Sawyer; but that ain't no matter.  That book was made
+by Mr. Mark Twain, and he told the truth, mainly.  There was things which
+he stretched, but mainly he told the truth.  That is nothing.  I never
+seen anybody but lied one time or another, without it was Aunt Polly, or
+the widow, or maybe Mary.  Aunt Polly--Tom's Aunt Polly, she is--and
+Mary, and the Widow Douglas is all told about in that book, which is
+mostly a true book, with some stretchers, as I said before.
+
+Now the way that the book winds up is this:  Tom and me found the money
+that the robbers hid in the cave, and it made us rich.  We got six
+thousand dollars apiece--all gold.  It was an awful sight of money when
+it was piled up.  Well, Judge Thatcher he took it and put it out at
+interest, and it fetched us a dollar a day apiece all the year round
+--more than a body could tell what to do with.  The Widow Douglas she took
+me for her son, and allowed she would sivilize me; but it was rough
+living in the house all the time, considering how dismal regular and
+decent the widow was in all her ways; and so when I couldn't stand it no
+longer I lit out.  I got into my old rags and my sugar-hogshead again,
+and was free and satisfied.  But Tom Sawyer he hunted me up and said he
+was going to start a band of robbers, and I might join if I would go back
+to the widow and be respectable.  So I went back.
+
+The widow she cried over me, and called me a poor lost lamb, and she
+called me a lot of other names, too, but she never meant no harm by it.
+She put me in them new clothes again, and I couldn't do nothing but sweat
+and sweat, and feel all cramped up.  Well, then, the old thing commenced
+again.  The widow rung a bell for supper, and you had to come to time.
+When you got to the table you couldn't go right to eating, but you had to
+wait for the widow to tuck down her head and grumble a little over the
+victuals, though there warn't really anything the matter with them,--that
+is, nothing only everything was cooked by itself.  In a barrel of odds
+and ends it is different; things get mixed up, and the juice kind of
+swaps around, and the things go better.
+
+After supper she got out her book and learned me about Moses and the
+Bulrushers, and I was in a sweat to find out all about him; but by and by
+she let it out that Moses had been dead a considerable long time; so then
+I didn't care no more about him, because I don't take no stock in dead
+people.
+
+Pretty soon I wanted to smoke, and asked the widow to let me.  But she
+wouldn't.  She said it was a mean practice and wasn't clean, and I must
+try to not do it any more.  That is just the way with some people.  They
+get down on a thing when they don't know nothing about it.  Here she was
+a-bothering about Moses, which was no kin to her, and no use to anybody,
+being gone, you see, yet finding a power of fault with me for doing a
+thing that had some good in it.  And she took snuff, too; of course that
+was all right, because she done it herself.
+
+Her sister, Miss Watson, a tolerable slim old maid, with goggles on,
+had just come to live with her, and took a set at me now with a
+spelling-book. She worked me middling hard for about an hour, and then
+the widow made her ease up.  I couldn't stood it much longer.  Then for
+an hour it was deadly dull, and I was fidgety.  Miss Watson would say,
+"Don't put your feet up there, Huckleberry;" and "Don't scrunch up like
+that, Huckleberry--set up straight;" and pretty soon she would say,
+"Don't gap and stretch like that, Huckleberry--why don't you try to
+behave?"  Then she told me all about the bad place, and I said I wished I
+was there. She got mad then, but I didn't mean no harm.  All I wanted was
+to go somewheres; all I wanted was a change, I warn't particular.  She
+said it was wicked to say what I said; said she wouldn't say it for the
+whole world; she was going to live so as to go to the good place.  Well,
+I couldn't see no advantage in going where she was going, so I made up my
+mind I wouldn't try for it.  But I never said so, because it would only
+make trouble, and wouldn't do no good.
+
+Now she had got a start, and she went on and told me all about the good
+place.  She said all a body would have to do there was to go around all
+day long with a harp and sing, forever and ever.  So I didn't think much
+of it. But I never said so.  I asked her if she reckoned Tom Sawyer would
+go there, and she said not by a considerable sight.  I was glad about
+that, because I wanted him and me to be together.
+
+Miss Watson she kept pecking at me, and it got tiresome and lonesome.  By
+and by they fetched the niggers in and had prayers, and then everybody
+was off to bed.  I went up to my room with a piece of candle, and put it
+on the table.  Then I set down in a chair by the window and tried to
+think of something cheerful, but it warn't no use.  I felt so lonesome I
+most wished I was dead.  The stars were shining, and the leaves rustled
+in the woods ever so mournful; and I heard an owl, away off, who-whooing
+about somebody that was dead, and a whippowill and a dog crying about
+somebody that was going to die; and the wind was trying to whisper
+something to me, and I couldn't make out what it was, and so it made the
+cold shivers run over me. Then away out in the woods I heard that kind of
+a sound that a ghost makes when it wants to tell about something that's
+on its mind and can't make itself understood, and so can't rest easy in
+its grave, and has to go about that way every night grieving.  I got so
+down-hearted and scared I did wish I had some company.  Pretty soon a
+spider went crawling up my shoulder, and I flipped it off and it lit in
+the candle; and before I could budge it was all shriveled up.  I didn't
+need anybody to tell me that that was an awful bad sign and would fetch
+me some bad luck, so I was scared and most shook the clothes off of me.
+I got up and turned around in my tracks three times and crossed my breast
+every time; and then I tied up a little lock of my hair with a thread to
+keep witches away.  But I hadn't no confidence.  You do that when you've
+lost a horseshoe that you've found, instead of nailing it up over the
+door, but I hadn't ever heard anybody say it was any way to keep off bad
+luck when you'd killed a spider.
+
+I set down again, a-shaking all over, and got out my pipe for a smoke;
+for the house was all as still as death now, and so the widow wouldn't
+know. Well, after a long time I heard the clock away off in the town go
+boom--boom--boom--twelve licks; and all still again--stiller than ever.
+Pretty soon I heard a twig snap down in the dark amongst the trees
+--something was a stirring.  I set still and listened.  Directly I could
+just barely hear a "me-yow! me-yow!" down there.  That was good!  Says I,
+"me-yow! me-yow!" as soft as I could, and then I put out the light and
+scrambled out of the window on to the shed.  Then I slipped down to the
+ground and crawled in among the trees, and, sure enough, there was Tom
+Sawyer waiting for me.
+
+
+
+
+CHAPTER II.
+
+WE went tiptoeing along a path amongst the trees back towards the end of
+the widow's garden, stooping down so as the branches wouldn't scrape our
+heads. When we was passing by the kitchen I fell over a root and made a
+noise.  We scrouched down and laid still.  Miss Watson's big nigger,
+named Jim, was setting in the kitchen door; we could see him pretty
+clear, because there was a light behind him.  He got up and stretched his
+neck out about a minute, listening.  Then he says:
+
+"Who dah?"
+
+He listened some more; then he come tiptoeing down and stood right
+between us; we could a touched him, nearly.  Well, likely it was minutes
+and minutes that there warn't a sound, and we all there so close
+together.  There was a place on my ankle that got to itching, but I
+dasn't scratch it; and then my ear begun to itch; and next my back, right
+between my shoulders.  Seemed like I'd die if I couldn't scratch.  Well,
+I've noticed that thing plenty times since.  If you are with the quality,
+or at a funeral, or trying to go to sleep when you ain't sleepy--if you
+are anywheres where it won't do for you to scratch, why you will itch all
+over in upwards of a thousand places. Pretty soon Jim says:
+
+"Say, who is you?  Whar is you?  Dog my cats ef I didn' hear sumf'n.
+Well, I know what I's gwyne to do:  I's gwyne to set down here and listen
+tell I hears it agin."
+
+So he set down on the ground betwixt me and Tom.  He leaned his back up
+against a tree, and stretched his legs out till one of them most touched
+one of mine.  My nose begun to itch.  It itched till the tears come into
+my eyes.  But I dasn't scratch.  Then it begun to itch on the inside.
+Next I got to itching underneath.  I didn't know how I was going to set
+still. This miserableness went on as much as six or seven minutes; but it
+seemed a sight longer than that.  I was itching in eleven different
+places now.  I reckoned I couldn't stand it more'n a minute longer, but I
+set my teeth hard and got ready to try.  Just then Jim begun to breathe
+heavy; next he begun to snore--and then I was pretty soon comfortable
+again.
+
+Tom he made a sign to me--kind of a little noise with his mouth--and we
+went creeping away on our hands and knees.  When we was ten foot off Tom
+whispered to me, and wanted to tie Jim to the tree for fun.  But I said
+no; he might wake and make a disturbance, and then they'd find out I
+warn't in. Then Tom said he hadn't got candles enough, and he would slip
+in the kitchen and get some more.  I didn't want him to try.  I said Jim
+might wake up and come.  But Tom wanted to resk it; so we slid in there
+and got three candles, and Tom laid five cents on the table for pay.
+Then we got out, and I was in a sweat to get away; but nothing would do
+Tom but he must crawl to where Jim was, on his hands and knees, and play
+something on him.  I waited, and it seemed a good while, everything was
+so still and lonesome.
+
+As soon as Tom was back we cut along the path, around the garden fence,
+and by and by fetched up on the steep top of the hill the other side of
+the house.  Tom said he slipped Jim's hat off of his head and hung it on
+a limb right over him, and Jim stirred a little, but he didn't wake.
+Afterwards Jim said the witches be witched him and put him in a trance,
+and rode him all over the State, and then set him under the trees again,
+and hung his hat on a limb to show who done it.  And next time Jim told
+it he said they rode him down to New Orleans; and, after that, every time
+he told it he spread it more and more, till by and by he said they rode
+him all over the world, and tired him most to death, and his back was all
+over saddle-boils.  Jim was monstrous proud about it, and he got so he
+wouldn't hardly notice the other niggers.  Niggers would come miles to
+hear Jim tell about it, and he was more looked up to than any nigger in
+that country.  Strange niggers would stand with their mouths open and
+look him all over, same as if he was a wonder.  Niggers is always talking
+about witches in the dark by the kitchen fire; but whenever one was
+talking and letting on to know all about such things, Jim would happen in
+and say, "Hm!  What you know 'bout witches?" and that nigger was corked
+up and had to take a back seat.  Jim always kept that five-center piece
+round his neck with a string, and said it was a charm the devil give to
+him with his own hands, and told him he could cure anybody with it and
+fetch witches whenever he wanted to just by saying something to it; but
+he never told what it was he said to it.  Niggers would come from all
+around there and give Jim anything they had, just for a sight of that
+five-center piece; but they wouldn't touch it, because the devil had had
+his hands on it.  Jim was most ruined for a servant, because he got stuck
+up on account of having seen the devil and been rode by witches.
+
+Well, when Tom and me got to the edge of the hilltop we looked away down
+into the village and could see three or four lights twinkling, where
+there was sick folks, maybe; and the stars over us was sparkling ever so
+fine; and down by the village was the river, a whole mile broad, and
+awful still and grand.  We went down the hill and found Jo Harper and Ben
+Rogers, and two or three more of the boys, hid in the old tanyard.  So we
+unhitched a skiff and pulled down the river two mile and a half, to the
+big scar on the hillside, and went ashore.
+
+We went to a clump of bushes, and Tom made everybody swear to keep the
+secret, and then showed them a hole in the hill, right in the thickest
+part of the bushes.  Then we lit the candles, and crawled in on our hands
+and knees.  We went about two hundred yards, and then the cave opened up.
+Tom poked about amongst the passages, and pretty soon ducked under a wall
+where you wouldn't a noticed that there was a hole.  We went along a
+narrow place and got into a kind of room, all damp and sweaty and cold,
+and there we stopped.  Tom says:
+
+"Now, we'll start this band of robbers and call it Tom Sawyer's Gang.
+Everybody that wants to join has got to take an oath, and write his name
+in blood."
+
+Everybody was willing.  So Tom got out a sheet of paper that he had wrote
+the oath on, and read it.  It swore every boy to stick to the band, and
+never tell any of the secrets; and if anybody done anything to any boy in
+the band, whichever boy was ordered to kill that person and his family
+must do it, and he mustn't eat and he mustn't sleep till he had killed
+them and hacked a cross in their breasts, which was the sign of the band.
+And nobody that didn't belong to the band could use that mark, and if he
+did he must be sued; and if he done it again he must be killed.  And if
+anybody that belonged to the band told the secrets, he must have his
+throat cut, and then have his carcass burnt up and the ashes scattered
+all around, and his name blotted off of the list with blood and never
+mentioned again by the gang, but have a curse put on it and be forgot
+forever.
+
+Everybody said it was a real beautiful oath, and asked Tom if he got it
+out of his own head.  He said, some of it, but the rest was out of
+pirate-books and robber-books, and every gang that was high-toned had it.
+
+Some thought it would be good to kill the FAMILIES of boys that told the
+secrets.  Tom said it was a good idea, so he took a pencil and wrote it
+in. Then Ben Rogers says:
+
+"Here's Huck Finn, he hain't got no family; what you going to do 'bout
+him?"
+
+"Well, hain't he got a father?" says Tom Sawyer.
+
+"Yes, he's got a father, but you can't never find him these days.  He
+used to lay drunk with the hogs in the tanyard, but he hain't been seen
+in these parts for a year or more."
+
+They talked it over, and they was going to rule me out, because they said
+every boy must have a family or somebody to kill, or else it wouldn't be
+fair and square for the others.  Well, nobody could think of anything to
+do--everybody was stumped, and set still.  I was most ready to cry; but
+all at once I thought of a way, and so I offered them Miss Watson--they
+could kill her.  Everybody said:
+
+"Oh, she'll do.  That's all right.  Huck can come in."
+
+Then they all stuck a pin in their fingers to get blood to sign with, and
+I made my mark on the paper.
+
+"Now," says Ben Rogers, "what's the line of business of this Gang?"
+
+"Nothing only robbery and murder," Tom said.
+
+"But who are we going to rob?--houses, or cattle, or--"
+
+"Stuff! stealing cattle and such things ain't robbery; it's burglary,"
+says Tom Sawyer.  "We ain't burglars.  That ain't no sort of style.  We
+are highwaymen.  We stop stages and carriages on the road, with masks on,
+and kill the people and take their watches and money."
+
+"Must we always kill the people?"
+
+"Oh, certainly.  It's best.  Some authorities think different, but mostly
+it's considered best to kill them--except some that you bring to the cave
+here, and keep them till they're ransomed."
+
+"Ransomed?  What's that?"
+
+"I don't know.  But that's what they do.  I've seen it in books; and so
+of course that's what we've got to do."
+
+"But how can we do it if we don't know what it is?"
+
+"Why, blame it all, we've GOT to do it.  Don't I tell you it's in the
+books?  Do you want to go to doing different from what's in the books,
+and get things all muddled up?"
+
+"Oh, that's all very fine to SAY, Tom Sawyer, but how in the nation are
+these fellows going to be ransomed if we don't know how to do it to them?
+--that's the thing I want to get at.  Now, what do you reckon it is?"
+
+"Well, I don't know.  But per'aps if we keep them till they're ransomed,
+it means that we keep them till they're dead."
+
+"Now, that's something LIKE.  That'll answer.  Why couldn't you said that
+before?  We'll keep them till they're ransomed to death; and a bothersome
+lot they'll be, too--eating up everything, and always trying to get
+loose."
+
+"How you talk, Ben Rogers.  How can they get loose when there's a guard
+over them, ready to shoot them down if they move a peg?"
+
+"A guard!  Well, that IS good.  So somebody's got to set up all night and
+never get any sleep, just so as to watch them.  I think that's
+foolishness. Why can't a body take a club and ransom them as soon as they
+get here?"
+
+"Because it ain't in the books so--that's why.  Now, Ben Rogers, do you
+want to do things regular, or don't you?--that's the idea.  Don't you
+reckon that the people that made the books knows what's the correct thing
+to do?  Do you reckon YOU can learn 'em anything?  Not by a good deal.
+No, sir, we'll just go on and ransom them in the regular way."
+
+"All right.  I don't mind; but I say it's a fool way, anyhow.  Say, do we
+kill the women, too?"
+
+"Well, Ben Rogers, if I was as ignorant as you I wouldn't let on.  Kill
+the women?  No; nobody ever saw anything in the books like that.  You
+fetch them to the cave, and you're always as polite as pie to them; and
+by and by they fall in love with you, and never want to go home any
+more."
+
+"Well, if that's the way I'm agreed, but I don't take no stock in it.
+Mighty soon we'll have the cave so cluttered up with women, and fellows
+waiting to be ransomed, that there won't be no place for the robbers.
+But go ahead, I ain't got nothing to say."
+
+Little Tommy Barnes was asleep now, and when they waked him up he was
+scared, and cried, and said he wanted to go home to his ma, and didn't
+want to be a robber any more.
+
+So they all made fun of him, and called him cry-baby, and that made him
+mad, and he said he would go straight and tell all the secrets.  But Tom
+give him five cents to keep quiet, and said we would all go home and meet
+next week, and rob somebody and kill some people.
+
+Ben Rogers said he couldn't get out much, only Sundays, and so he wanted
+to begin next Sunday; but all the boys said it would be wicked to do it
+on Sunday, and that settled the thing.  They agreed to get together and
+fix a day as soon as they could, and then we elected Tom Sawyer first
+captain and Jo Harper second captain of the Gang, and so started home.
+
+I clumb up the shed and crept into my window just before day was
+breaking. My new clothes was all greased up and clayey, and I was
+dog-tired.
+
+
+
+
+CHAPTER III.
+
+WELL, I got a good going-over in the morning from old Miss Watson on
+account of my clothes; but the widow she didn't scold, but only cleaned
+off the grease and clay, and looked so sorry that I thought I would
+behave awhile if I could.  Then Miss Watson she took me in the closet and
+prayed, but nothing come of it.  She told me to pray every day, and
+whatever I asked for I would get it.  But it warn't so.  I tried it.
+Once I got a fish-line, but no hooks.  It warn't any good to me without
+hooks.  I tried for the hooks three or four times, but somehow I couldn't
+make it work.  By and by, one day, I asked Miss Watson to try for me, but
+she said I was a fool.  She never told me why, and I couldn't make it out
+no way.
+
+I set down one time back in the woods, and had a long think about it.  I
+says to myself, if a body can get anything they pray for, why don't
+Deacon Winn get back the money he lost on pork?  Why can't the widow get
+back her silver snuffbox that was stole?  Why can't Miss Watson fat up?
+No, says I to my self, there ain't nothing in it.  I went and told the
+widow about it, and she said the thing a body could get by praying for it
+was "spiritual gifts."  This was too many for me, but she told me what
+she meant--I must help other people, and do everything I could for other
+people, and look out for them all the time, and never think about myself.
+This was including Miss Watson, as I took it.  I went out in the woods
+and turned it over in my mind a long time, but I couldn't see no
+advantage about it--except for the other people; so at last I reckoned I
+wouldn't worry about it any more, but just let it go.  Sometimes the
+widow would take me one side and talk about Providence in a way to make a
+body's mouth water; but maybe next day Miss Watson would take hold and
+knock it all down again.  I judged I could see that there was two
+Providences, and a poor chap would stand considerable show with the
+widow's Providence, but if Miss Watson's got him there warn't no help for
+him any more.  I thought it all out, and reckoned I would belong to the
+widow's if he wanted me, though I couldn't make out how he was a-going to
+be any better off then than what he was before, seeing I was so ignorant,
+and so kind of low-down and ornery.
+
+Pap he hadn't been seen for more than a year, and that was comfortable
+for me; I didn't want to see him no more.  He used to always whale me
+when he was sober and could get his hands on me; though I used to take to
+the woods most of the time when he was around.  Well, about this time he
+was found in the river drownded, about twelve mile above town, so people
+said.  They judged it was him, anyway; said this drownded man was just
+his size, and was ragged, and had uncommon long hair, which was all like
+pap; but they couldn't make nothing out of the face, because it had been
+in the water so long it warn't much like a face at all.  They said he was
+floating on his back in the water.  They took him and buried him on the
+bank.  But I warn't comfortable long, because I happened to think of
+something.  I knowed mighty well that a drownded man don't float on his
+back, but on his face.  So I knowed, then, that this warn't pap, but a
+woman dressed up in a man's clothes.  So I was uncomfortable again.  I
+judged the old man would turn up again by and by, though I wished he
+wouldn't.
+
+We played robber now and then about a month, and then I resigned.  All
+the boys did.  We hadn't robbed nobody, hadn't killed any people, but
+only just pretended.  We used to hop out of the woods and go charging
+down on hog-drivers and women in carts taking garden stuff to market, but
+we never hived any of them.  Tom Sawyer called the hogs "ingots," and he
+called the turnips and stuff "julery," and we would go to the cave and
+powwow over what we had done, and how many people we had killed and
+marked.  But I couldn't see no profit in it.  One time Tom sent a boy to
+run about town with a blazing stick, which he called a slogan (which was
+the sign for the Gang to get together), and then he said he had got
+secret news by his spies that next day a whole parcel of Spanish
+merchants and rich A-rabs was going to camp in Cave Hollow with two
+hundred elephants, and six hundred camels, and over a thousand "sumter"
+mules, all loaded down with di'monds, and they didn't have only a guard
+of four hundred soldiers, and so we would lay in ambuscade, as he called
+it, and kill the lot and scoop the things.  He said we must slick up our
+swords and guns, and get ready.  He never could go after even a
+turnip-cart but he must have the swords and guns all scoured up for it,
+though they was only lath and broomsticks, and you might scour at them
+till you rotted, and then they warn't worth a mouthful of ashes more than
+what they was before.  I didn't believe we could lick such a crowd of
+Spaniards and A-rabs, but I wanted to see the camels and elephants, so I
+was on hand next day, Saturday, in the ambuscade; and when we got the
+word we rushed out of the woods and down the hill.  But there warn't no
+Spaniards and A-rabs, and there warn't no camels nor no elephants.  It
+warn't anything but a Sunday-school picnic, and only a primer-class at
+that.  We busted it up, and chased the children up the hollow; but we
+never got anything but some doughnuts and jam, though Ben Rogers got a
+rag doll, and Jo Harper got a hymn-book and a tract; and then the teacher
+charged in, and made us drop everything and cut.  I didn't see no
+di'monds, and I told Tom Sawyer so.  He said there was loads of them
+there, anyway; and he said there was A-rabs there, too, and elephants and
+things.  I said, why couldn't we see them, then?  He said if I warn't so
+ignorant, but had read a book called Don Quixote, I would know without
+asking.  He said it was all done by enchantment.  He said there was
+hundreds of soldiers there, and elephants and treasure, and so on, but we
+had enemies which he called magicians; and they had turned the whole
+thing into an infant Sunday-school, just out of spite.  I said, all
+right; then the thing for us to do was to go for the magicians.  Tom
+Sawyer said I was a numskull.
+
+"Why," said he, "a magician could call up a lot of genies, and they would
+hash you up like nothing before you could say Jack Robinson.  They are as
+tall as a tree and as big around as a church."
+
+"Well," I says, "s'pose we got some genies to help US--can't we lick the
+other crowd then?"
+
+"How you going to get them?"
+
+"I don't know.  How do THEY get them?"
+
+"Why, they rub an old tin lamp or an iron ring, and then the genies come
+tearing in, with the thunder and lightning a-ripping around and the smoke
+a-rolling, and everything they're told to do they up and do it.  They
+don't think nothing of pulling a shot-tower up by the roots, and belting
+a Sunday-school superintendent over the head with it--or any other man."
+
+"Who makes them tear around so?"
+
+"Why, whoever rubs the lamp or the ring.  They belong to whoever rubs the
+lamp or the ring, and they've got to do whatever he says.  If he tells
+them to build a palace forty miles long out of di'monds, and fill it full
+of chewing-gum, or whatever you want, and fetch an emperor's daughter
+from China for you to marry, they've got to do it--and they've got to do
+it before sun-up next morning, too.  And more:  they've got to waltz that
+palace around over the country wherever you want it, you understand."
+
+"Well," says I, "I think they are a pack of flat-heads for not keeping
+the palace themselves 'stead of fooling them away like that.  And what's
+more--if I was one of them I would see a man in Jericho before I would
+drop my business and come to him for the rubbing of an old tin lamp."
+
+"How you talk, Huck Finn.  Why, you'd HAVE to come when he rubbed it,
+whether you wanted to or not."
+
+"What! and I as high as a tree and as big as a church?  All right, then;
+I WOULD come; but I lay I'd make that man climb the highest tree there
+was in the country."
+
+"Shucks, it ain't no use to talk to you, Huck Finn.  You don't seem to
+know anything, somehow--perfect saphead."
+
+I thought all this over for two or three days, and then I reckoned I
+would see if there was anything in it.  I got an old tin lamp and an iron
+ring, and went out in the woods and rubbed and rubbed till I sweat like
+an Injun, calculating to build a palace and sell it; but it warn't no
+use, none of the genies come.  So then I judged that all that stuff was
+only just one of Tom Sawyer's lies.  I reckoned he believed in the A-rabs
+and the elephants, but as for me I think different.  It had all the marks
+of a Sunday-school.
+
+
+
+
+CHAPTER IV.
+
+WELL, three or four months run along, and it was well into the winter
+now. I had been to school most all the time and could spell and read and
+write just a little, and could say the multiplication table up to six
+times seven is thirty-five, and I don't reckon I could ever get any
+further than that if I was to live forever.  I don't take no stock in
+mathematics, anyway.
+
+At first I hated the school, but by and by I got so I could stand it.
+Whenever I got uncommon tired I played hookey, and the hiding I got next
+day done me good and cheered me up.  So the longer I went to school the
+easier it got to be.  I was getting sort of used to the widow's ways,
+too, and they warn't so raspy on me.  Living in a house and sleeping in a
+bed pulled on me pretty tight mostly, but before the cold weather I used
+to slide out and sleep in the woods sometimes, and so that was a rest to
+me.  I liked the old ways best, but I was getting so I liked the new
+ones, too, a little bit. The widow said I was coming along slow but sure,
+and doing very satisfactory.  She said she warn't ashamed of me.
+
+One morning I happened to turn over the salt-cellar at breakfast.  I
+reached for some of it as quick as I could to throw over my left shoulder
+and keep off the bad luck, but Miss Watson was in ahead of me, and
+crossed me off. She says, "Take your hands away, Huckleberry; what a mess
+you are always making!"  The widow put in a good word for me, but that
+warn't going to keep off the bad luck, I knowed that well enough.  I
+started out, after breakfast, feeling worried and shaky, and wondering
+where it was going to fall on me, and what it was going to be.  There is
+ways to keep off some kinds of bad luck, but this wasn't one of them
+kind; so I never tried to do anything, but just poked along low-spirited
+and on the watch-out.
+
+I went down to the front garden and clumb over the stile where you go
+through the high board fence.  There was an inch of new snow on the
+ground, and I seen somebody's tracks.  They had come up from the quarry
+and stood around the stile a while, and then went on around the garden
+fence.  It was funny they hadn't come in, after standing around so.  I
+couldn't make it out.  It was very curious, somehow.  I was going to
+follow around, but I stooped down to look at the tracks first.  I didn't
+notice anything at first, but next I did.  There was a cross in the left
+boot-heel made with big nails, to keep off the devil.
+
+I was up in a second and shinning down the hill.  I looked over my
+shoulder every now and then, but I didn't see nobody.  I was at Judge
+Thatcher's as quick as I could get there.  He said:
+
+"Why, my boy, you are all out of breath.  Did you come for your
+interest?"
+
+"No, sir," I says; "is there some for me?"
+
+"Oh, yes, a half-yearly is in last night--over a hundred and fifty
+dollars.  Quite a fortune for you.  You had better let me invest it along
+with your six thousand, because if you take it you'll spend it."
+
+"No, sir," I says, "I don't want to spend it.  I don't want it at all
+--nor the six thousand, nuther.  I want you to take it; I want to give it
+to you--the six thousand and all."
+
+He looked surprised.  He couldn't seem to make it out.  He says:
+
+"Why, what can you mean, my boy?"
+
+I says, "Don't you ask me no questions about it, please.  You'll take it
+--won't you?"
+
+He says:
+
+"Well, I'm puzzled.  Is something the matter?"
+
+"Please take it," says I, "and don't ask me nothing--then I won't have to
+tell no lies."
+
+He studied a while, and then he says:
+
+"Oho-o!  I think I see.  You want to SELL all your property to me--not
+give it.  That's the correct idea."
+
+Then he wrote something on a paper and read it over, and says:
+
+"There; you see it says 'for a consideration.'  That means I have bought
+it of you and paid you for it.  Here's a dollar for you.  Now you sign
+it."
+
+So I signed it, and left.
+
+Miss Watson's nigger, Jim, had a hair-ball as big as your fist, which had
+been took out of the fourth stomach of an ox, and he used to do magic
+with it.  He said there was a spirit inside of it, and it knowed
+everything.  So I went to him that night and told him pap was here again,
+for I found his tracks in the snow.  What I wanted to know was, what he
+was going to do, and was he going to stay?  Jim got out his hair-ball and
+said something over it, and then he held it up and dropped it on the
+floor.  It fell pretty solid, and only rolled about an inch.  Jim tried
+it again, and then another time, and it acted just the same.  Jim got
+down on his knees, and put his ear against it and listened.  But it
+warn't no use; he said it wouldn't talk. He said sometimes it wouldn't
+talk without money.  I told him I had an old slick counterfeit quarter
+that warn't no good because the brass showed through the silver a little,
+and it wouldn't pass nohow, even if the brass didn't show, because it was
+so slick it felt greasy, and so that would tell on it every time.  (I
+reckoned I wouldn't say nothing about the dollar I got from the judge.) I
+said it was pretty bad money, but maybe the hair-ball would take it,
+because maybe it wouldn't know the difference.  Jim smelt it and bit it
+and rubbed it, and said he would manage so the hair-ball would think it
+was good.  He said he would split open a raw Irish potato and stick the
+quarter in between and keep it there all night, and next morning you
+couldn't see no brass, and it wouldn't feel greasy no more, and so
+anybody in town would take it in a minute, let alone a hair-ball.  Well,
+I knowed a potato would do that before, but I had forgot it.
+
+Jim put the quarter under the hair-ball, and got down and listened again.
+This time he said the hair-ball was all right.  He said it would tell my
+whole fortune if I wanted it to.  I says, go on.  So the hair-ball talked
+to Jim, and Jim told it to me.  He says:
+
+"Yo' ole father doan' know yit what he's a-gwyne to do.  Sometimes he
+spec he'll go 'way, en den agin he spec he'll stay.  De bes' way is to
+res' easy en let de ole man take his own way.  Dey's two angels hoverin'
+roun' 'bout him.  One uv 'em is white en shiny, en t'other one is black.
+De white one gits him to go right a little while, den de black one sail
+in en bust it all up.  A body can't tell yit which one gwyne to fetch him
+at de las'.  But you is all right.  You gwyne to have considable trouble
+in yo' life, en considable joy.  Sometimes you gwyne to git hurt, en
+sometimes you gwyne to git sick; but every time you's gwyne to git well
+agin.  Dey's two gals flyin' 'bout you in yo' life.  One uv 'em's light
+en t'other one is dark. One is rich en t'other is po'.  You's gwyne to
+marry de po' one fust en de rich one by en by.  You wants to keep 'way
+fum de water as much as you kin, en don't run no resk, 'kase it's down in
+de bills dat you's gwyne to git hung."
+
+When I lit my candle and went up to my room that night there sat pap--his
+own self!
+
+
+
+
+CHAPTER V.
+
+I HAD shut the door to.  Then I turned around and there he was.  I used
+to be scared of him all the time, he tanned me so much.  I reckoned I was
+scared now, too; but in a minute I see I was mistaken--that is, after the
+first jolt, as you may say, when my breath sort of hitched, he being so
+unexpected; but right away after I see I warn't scared of him worth
+bothring about.
+
+He was most fifty, and he looked it.  His hair was long and tangled and
+greasy, and hung down, and you could see his eyes shining through like he
+was behind vines.  It was all black, no gray; so was his long, mixed-up
+whiskers.  There warn't no color in his face, where his face showed; it
+was white; not like another man's white, but a white to make a body sick,
+a white to make a body's flesh crawl--a tree-toad white, a fish-belly
+white.  As for his clothes--just rags, that was all.  He had one ankle
+resting on t'other knee; the boot on that foot was busted, and two of his
+toes stuck through, and he worked them now and then.  His hat was laying
+on the floor--an old black slouch with the top caved in, like a lid.
+
+I stood a-looking at him; he set there a-looking at me, with his chair
+tilted back a little.  I set the candle down.  I noticed the window was
+up; so he had clumb in by the shed.  He kept a-looking me all over.  By
+and by he says:
+
+"Starchy clothes--very.  You think you're a good deal of a big-bug, DON'T
+you?"
+
+"Maybe I am, maybe I ain't," I says.
+
+"Don't you give me none o' your lip," says he.  "You've put on
+considerable many frills since I been away.  I'll take you down a peg
+before I get done with you.  You're educated, too, they say--can read and
+write.  You think you're better'n your father, now, don't you, because he
+can't?  I'LL take it out of you.  Who told you you might meddle with such
+hifalut'n foolishness, hey?--who told you you could?"
+
+"The widow.  She told me."
+
+"The widow, hey?--and who told the widow she could put in her shovel
+about a thing that ain't none of her business?"
+
+"Nobody never told her."
+
+"Well, I'll learn her how to meddle.  And looky here--you drop that
+school, you hear?  I'll learn people to bring up a boy to put on airs
+over his own father and let on to be better'n what HE is.  You lemme
+catch you fooling around that school again, you hear?  Your mother
+couldn't read, and she couldn't write, nuther, before she died.  None of
+the family couldn't before THEY died.  I can't; and here you're
+a-swelling yourself up like this.  I ain't the man to stand it--you hear?
+Say, lemme hear you read."
+
+I took up a book and begun something about General Washington and the
+wars. When I'd read about a half a minute, he fetched the book a whack
+with his hand and knocked it across the house.  He says:
+
+"It's so.  You can do it.  I had my doubts when you told me.  Now looky
+here; you stop that putting on frills.  I won't have it.  I'll lay for
+you, my smarty; and if I catch you about that school I'll tan you good.
+First you know you'll get religion, too.  I never see such a son."
+
+He took up a little blue and yaller picture of some cows and a boy, and
+says:
+
+"What's this?"
+
+"It's something they give me for learning my lessons good."
+
+He tore it up, and says:
+
+"I'll give you something better--I'll give you a cowhide."
+
+He set there a-mumbling and a-growling a minute, and then he says:
+
+"AIN'T you a sweet-scented dandy, though?  A bed; and bedclothes; and a
+look'n'-glass; and a piece of carpet on the floor--and your own father
+got to sleep with the hogs in the tanyard.  I never see such a son.  I
+bet I'll take some o' these frills out o' you before I'm done with you.
+Why, there ain't no end to your airs--they say you're rich.  Hey?--how's
+that?"
+
+"They lie--that's how."
+
+"Looky here--mind how you talk to me; I'm a-standing about all I can
+stand now--so don't gimme no sass.  I've been in town two days, and I
+hain't heard nothing but about you bein' rich.  I heard about it away
+down the river, too.  That's why I come.  You git me that money
+to-morrow--I want it."
+
+"I hain't got no money."
+
+"It's a lie.  Judge Thatcher's got it.  You git it.  I want it."
+
+"I hain't got no money, I tell you.  You ask Judge Thatcher; he'll tell
+you the same."
+
+"All right.  I'll ask him; and I'll make him pungle, too, or I'll know
+the reason why.  Say, how much you got in your pocket?  I want it."
+
+"I hain't got only a dollar, and I want that to--"
+
+"It don't make no difference what you want it for--you just shell it
+out."
+
+He took it and bit it to see if it was good, and then he said he was
+going down town to get some whisky; said he hadn't had a drink all day.
+When he had got out on the shed he put his head in again, and cussed me
+for putting on frills and trying to be better than him; and when I
+reckoned he was gone he come back and put his head in again, and told me
+to mind about that school, because he was going to lay for me and lick me
+if I didn't drop that.
+
+Next day he was drunk, and he went to Judge Thatcher's and bullyragged
+him, and tried to make him give up the money; but he couldn't, and then
+he swore he'd make the law force him.
+
+The judge and the widow went to law to get the court to take me away from
+him and let one of them be my guardian; but it was a new judge that had
+just come, and he didn't know the old man; so he said courts mustn't
+interfere and separate families if they could help it; said he'd druther
+not take a child away from its father.  So Judge Thatcher and the widow
+had to quit on the business.
+
+That pleased the old man till he couldn't rest.  He said he'd cowhide me
+till I was black and blue if I didn't raise some money for him.  I
+borrowed three dollars from Judge Thatcher, and pap took it and got
+drunk, and went a-blowing around and cussing and whooping and carrying
+on; and he kept it up all over town, with a tin pan, till most midnight;
+then they jailed him, and next day they had him before court, and jailed
+him again for a week.  But he said HE was satisfied; said he was boss of
+his son, and he'd make it warm for HIM.
+
+When he got out the new judge said he was a-going to make a man of him.
+So he took him to his own house, and dressed him up clean and nice, and
+had him to breakfast and dinner and supper with the family, and was just
+old pie to him, so to speak.  And after supper he talked to him about
+temperance and such things till the old man cried, and said he'd been a
+fool, and fooled away his life; but now he was a-going to turn over a new
+leaf and be a man nobody wouldn't be ashamed of, and he hoped the judge
+would help him and not look down on him.  The judge said he could hug him
+for them words; so he cried, and his wife she cried again; pap said he'd
+been a man that had always been misunderstood before, and the judge said
+he believed it.  The old man said that what a man wanted that was down
+was sympathy, and the judge said it was so; so they cried again.  And
+when it was bedtime the old man rose up and held out his hand, and says:
+
+"Look at it, gentlemen and ladies all; take a-hold of it; shake it.
+There's a hand that was the hand of a hog; but it ain't so no more; it's
+the hand of a man that's started in on a new life, and'll die before
+he'll go back.  You mark them words--don't forget I said them.  It's a
+clean hand now; shake it--don't be afeard."
+
+So they shook it, one after the other, all around, and cried.  The
+judge's wife she kissed it.  Then the old man he signed a pledge--made
+his mark. The judge said it was the holiest time on record, or something
+like that. Then they tucked the old man into a beautiful room, which was
+the spare room, and in the night some time he got powerful thirsty and
+clumb out on to the porch-roof and slid down a stanchion and traded his
+new coat for a jug of forty-rod, and clumb back again and had a good old
+time; and towards daylight he crawled out again, drunk as a fiddler, and
+rolled off the porch and broke his left arm in two places, and was most
+froze to death when somebody found him after sun-up.  And when they come
+to look at that spare room they had to take soundings before they could
+navigate it.
+
+The judge he felt kind of sore.  He said he reckoned a body could reform
+the old man with a shotgun, maybe, but he didn't know no other way.
+
+
+
+
+CHAPTER VI.
+
+WELL, pretty soon the old man was up and around again, and then he went
+for Judge Thatcher in the courts to make him give up that money, and he
+went for me, too, for not stopping school.  He catched me a couple of
+times and thrashed me, but I went to school just the same, and dodged him
+or outrun him most of the time.  I didn't want to go to school much
+before, but I reckoned I'd go now to spite pap.  That law trial was a
+slow business--appeared like they warn't ever going to get started on
+it; so every now and then I'd borrow two or three dollars off of the
+judge for him, to keep from getting a cowhiding.  Every time he got money
+he got drunk; and every time he got drunk he raised Cain around town; and
+every time he raised Cain he got jailed.  He was just suited--this kind
+of thing was right in his line.
+
+He got to hanging around the widow's too much and so she told him at last
+that if he didn't quit using around there she would make trouble for him.
+Well, WASN'T he mad?  He said he would show who was Huck Finn's boss.  So
+he watched out for me one day in the spring, and catched me, and took me
+up the river about three mile in a skiff, and crossed over to the
+Illinois shore where it was woody and there warn't no houses but an old
+log hut in a place where the timber was so thick you couldn't find it if
+you didn't know where it was.
+
+He kept me with him all the time, and I never got a chance to run off.
+We lived in that old cabin, and he always locked the door and put the key
+under his head nights.  He had a gun which he had stole, I reckon, and we
+fished and hunted, and that was what we lived on.  Every little while he
+locked me in and went down to the store, three miles, to the ferry, and
+traded fish and game for whisky, and fetched it home and got drunk and
+had a good time, and licked me.  The widow she found out where I was by
+and by, and she sent a man over to try to get hold of me; but pap drove
+him off with the gun, and it warn't long after that till I was used to
+being where I was, and liked it--all but the cowhide part.
+
+It was kind of lazy and jolly, laying off comfortable all day, smoking
+and fishing, and no books nor study.  Two months or more run along, and
+my clothes got to be all rags and dirt, and I didn't see how I'd ever got
+to like it so well at the widow's, where you had to wash, and eat on a
+plate, and comb up, and go to bed and get up regular, and be forever
+bothering over a book, and have old Miss Watson pecking at you all the
+time.  I didn't want to go back no more.  I had stopped cussing, because
+the widow didn't like it; but now I took to it again because pap hadn't
+no objections.  It was pretty good times up in the woods there, take it
+all around.
+
+But by and by pap got too handy with his hick'ry, and I couldn't stand
+it. I was all over welts.  He got to going away so much, too, and locking
+me in.  Once he locked me in and was gone three days.  It was dreadful
+lonesome.  I judged he had got drownded, and I wasn't ever going to get
+out any more.  I was scared.  I made up my mind I would fix up some way
+to leave there.  I had tried to get out of that cabin many a time, but I
+couldn't find no way.  There warn't a window to it big enough for a dog
+to get through.  I couldn't get up the chimbly; it was too narrow.  The
+door was thick, solid oak slabs.  Pap was pretty careful not to leave a
+knife or anything in the cabin when he was away; I reckon I had hunted
+the place over as much as a hundred times; well, I was most all the time
+at it, because it was about the only way to put in the time.  But this
+time I found something at last; I found an old rusty wood-saw without any
+handle; it was laid in between a rafter and the clapboards of the roof.
+I greased it up and went to work.  There was an old horse-blanket nailed
+against the logs at the far end of the cabin behind the table, to keep
+the wind from blowing through the chinks and putting the candle out.  I
+got under the table and raised the blanket, and went to work to saw a
+section of the big bottom log out--big enough to let me through.  Well,
+it was a good long job, but I was getting towards the end of it when I
+heard pap's gun in the woods.  I got rid of the signs of my work, and
+dropped the blanket and hid my saw, and pretty soon pap come in.
+
+Pap warn't in a good humor--so he was his natural self.  He said he was
+down town, and everything was going wrong.  His lawyer said he reckoned
+he would win his lawsuit and get the money if they ever got started on
+the trial; but then there was ways to put it off a long time, and Judge
+Thatcher knowed how to do it.  And he said people allowed there'd be
+another trial to get me away from him and give me to the widow for my
+guardian, and they guessed it would win this time.  This shook me up
+considerable, because I didn't want to go back to the widow's any more
+and be so cramped up and sivilized, as they called it.  Then the old man
+got to cussing, and cussed everything and everybody he could think of,
+and then cussed them all over again to make sure he hadn't skipped any,
+and after that he polished off with a kind of a general cuss all round,
+including a considerable parcel of people which he didn't know the names
+of, and so called them what's-his-name when he got to them, and went
+right along with his cussing.
+
+He said he would like to see the widow get me.  He said he would watch
+out, and if they tried to come any such game on him he knowed of a place
+six or seven mile off to stow me in, where they might hunt till they
+dropped and they couldn't find me.  That made me pretty uneasy again, but
+only for a minute; I reckoned I wouldn't stay on hand till he got that
+chance.
+
+The old man made me go to the skiff and fetch the things he had got.
+There was a fifty-pound sack of corn meal, and a side of bacon,
+ammunition, and a four-gallon jug of whisky, and an old book and two
+newspapers for wadding, besides some tow.  I toted up a load, and went
+back and set down on the bow of the skiff to rest.  I thought it all
+over, and I reckoned I would walk off with the gun and some lines, and
+take to the woods when I run away.  I guessed I wouldn't stay in one
+place, but just tramp right across the country, mostly night times, and
+hunt and fish to keep alive, and so get so far away that the old man nor
+the widow couldn't ever find me any more.  I judged I would saw out and
+leave that night if pap got drunk enough, and I reckoned he would.  I got
+so full of it I didn't notice how long I was staying till the old man
+hollered and asked me whether I was asleep or drownded.
+
+I got the things all up to the cabin, and then it was about dark.  While
+I was cooking supper the old man took a swig or two and got sort of
+warmed up, and went to ripping again.  He had been drunk over in town,
+and laid in the gutter all night, and he was a sight to look at.  A body
+would a thought he was Adam--he was just all mud.  Whenever his liquor
+begun to work he most always went for the govment, this time he says:
+
+"Call this a govment! why, just look at it and see what it's like.
+Here's the law a-standing ready to take a man's son away from him--a
+man's own son, which he has had all the trouble and all the anxiety and
+all the expense of raising.  Yes, just as that man has got that son
+raised at last, and ready to go to work and begin to do suthin' for HIM
+and give him a rest, the law up and goes for him.  And they call THAT
+govment!  That ain't all, nuther.  The law backs that old Judge Thatcher
+up and helps him to keep me out o' my property.  Here's what the law
+does:  The law takes a man worth six thousand dollars and up'ards, and
+jams him into an old trap of a cabin like this, and lets him go round in
+clothes that ain't fitten for a hog. They call that govment!  A man can't
+get his rights in a govment like this. Sometimes I've a mighty notion to
+just leave the country for good and all. Yes, and I TOLD 'em so; I told
+old Thatcher so to his face.  Lots of 'em heard me, and can tell what I
+said.  Says I, for two cents I'd leave the blamed country and never come
+a-near it agin.  Them's the very words.  I says look at my hat--if you
+call it a hat--but the lid raises up and the rest of it goes down till
+it's below my chin, and then it ain't rightly a hat at all, but more like
+my head was shoved up through a jint o' stove-pipe.  Look at it, says I
+--such a hat for me to wear--one of the wealthiest men in this town if I
+could git my rights.
+
+"Oh, yes, this is a wonderful govment, wonderful.  Why, looky here.
+There was a free nigger there from Ohio--a mulatter, most as white as a
+white man.  He had the whitest shirt on you ever see, too, and the
+shiniest hat; and there ain't a man in that town that's got as fine
+clothes as what he had; and he had a gold watch and chain, and a
+silver-headed cane--the awfulest old gray-headed nabob in the State.  And
+what do you think?  They said he was a p'fessor in a college, and could
+talk all kinds of languages, and knowed everything.  And that ain't the
+wust. They said he could VOTE when he was at home.  Well, that let me
+out. Thinks I, what is the country a-coming to?  It was 'lection day, and
+I was just about to go and vote myself if I warn't too drunk to get
+there; but when they told me there was a State in this country where
+they'd let that nigger vote, I drawed out.  I says I'll never vote agin.
+Them's the very words I said; they all heard me; and the country may rot
+for all me--I'll never vote agin as long as I live.  And to see the cool
+way of that nigger--why, he wouldn't a give me the road if I hadn't
+shoved him out o' the way.  I says to the people, why ain't this nigger
+put up at auction and sold?--that's what I want to know.  And what do you
+reckon they said? Why, they said he couldn't be sold till he'd been in
+the State six months, and he hadn't been there that long yet.  There,
+now--that's a specimen.  They call that a govment that can't sell a free
+nigger till he's been in the State six months.  Here's a govment that
+calls itself a govment, and lets on to be a govment, and thinks it is a
+govment, and yet's got to set stock-still for six whole months before it
+can take a hold of a prowling, thieving, infernal, white-shirted free
+nigger, and--"
+
+Pap was agoing on so he never noticed where his old limber legs was
+taking him to, so he went head over heels over the tub of salt pork and
+barked both shins, and the rest of his speech was all the hottest kind of
+language--mostly hove at the nigger and the govment, though he give the
+tub some, too, all along, here and there.  He hopped around the cabin
+considerable, first on one leg and then on the other, holding first one
+shin and then the other one, and at last he let out with his left foot
+all of a sudden and fetched the tub a rattling kick.  But it warn't good
+judgment, because that was the boot that had a couple of his toes leaking
+out of the front end of it; so now he raised a howl that fairly made a
+body's hair raise, and down he went in the dirt, and rolled there, and
+held his toes; and the cussing he done then laid over anything he had
+ever done previous.  He said so his own self afterwards.  He had heard
+old Sowberry Hagan in his best days, and he said it laid over him, too;
+but I reckon that was sort of piling it on, maybe.
+
+After supper pap took the jug, and said he had enough whisky there for
+two drunks and one delirium tremens.  That was always his word.  I judged
+he would be blind drunk in about an hour, and then I would steal the key,
+or saw myself out, one or t'other.  He drank and drank, and tumbled down
+on his blankets by and by; but luck didn't run my way.  He didn't go
+sound asleep, but was uneasy.  He groaned and moaned and thrashed around
+this way and that for a long time.  At last I got so sleepy I couldn't
+keep my eyes open all I could do, and so before I knowed what I was about
+I was sound asleep, and the candle burning.
+
+I don't know how long I was asleep, but all of a sudden there was an
+awful scream and I was up.  There was pap looking wild, and skipping
+around every which way and yelling about snakes.  He said they was
+crawling up his legs; and then he would give a jump and scream, and say
+one had bit him on the cheek--but I couldn't see no snakes.  He started
+and run round and round the cabin, hollering "Take him off! take him off!
+he's biting me on the neck!"  I never see a man look so wild in the eyes.
+Pretty soon he was all fagged out, and fell down panting; then he rolled
+over and over wonderful fast, kicking things every which way, and
+striking and grabbing at the air with his hands, and screaming and saying
+there was devils a-hold of him.  He wore out by and by, and laid still a
+while, moaning.  Then he laid stiller, and didn't make a sound.  I could
+hear the owls and the wolves away off in the woods, and it seemed
+terrible still.  He was laying over by the corner. By and by he raised up
+part way and listened, with his head to one side.  He says, very low:
+
+"Tramp--tramp--tramp; that's the dead; tramp--tramp--tramp; they're
+coming after me; but I won't go.  Oh, they're here! don't touch me
+--don't! hands off--they're cold; let go.  Oh, let a poor devil alone!"
+
+Then he went down on all fours and crawled off, begging them to let him
+alone, and he rolled himself up in his blanket and wallowed in under the
+old pine table, still a-begging; and then he went to crying.  I could
+hear him through the blanket.
+
+By and by he rolled out and jumped up on his feet looking wild, and he
+see me and went for me.  He chased me round and round the place with a
+clasp-knife, calling me the Angel of Death, and saying he would kill me,
+and then I couldn't come for him no more.  I begged, and told him I was
+only Huck; but he laughed SUCH a screechy laugh, and roared and cussed,
+and kept on chasing me up.  Once when I turned short and dodged under his
+arm he made a grab and got me by the jacket between my shoulders, and I
+thought I was gone; but I slid out of the jacket quick as lightning, and
+saved myself. Pretty soon he was all tired out, and dropped down with his
+back against the door, and said he would rest a minute and then kill me.
+He put his knife under him, and said he would sleep and get strong, and
+then he would see who was who.
+
+So he dozed off pretty soon.  By and by I got the old split-bottom chair
+and clumb up as easy as I could, not to make any noise, and got down the
+gun.  I slipped the ramrod down it to make sure it was loaded, then I
+laid it across the turnip barrel, pointing towards pap, and set down
+behind it to wait for him to stir.  And how slow and still the time did
+drag along.
+
+
+
+
+CHAPTER VII.
+
+"GIT up!  What you 'bout?"
+
+I opened my eyes and looked around, trying to make out where I was.  It
+was after sun-up, and I had been sound asleep.  Pap was standing over me
+looking sour and sick, too.  He says:
+
+"What you doin' with this gun?"
+
+I judged he didn't know nothing about what he had been doing, so I says:
+
+"Somebody tried to get in, so I was laying for him."
+
+"Why didn't you roust me out?"
+
+"Well, I tried to, but I couldn't; I couldn't budge you."
+
+"Well, all right.  Don't stand there palavering all day, but out with you
+and see if there's a fish on the lines for breakfast.  I'll be along in a
+minute."
+
+He unlocked the door, and I cleared out up the river-bank.  I noticed
+some pieces of limbs and such things floating down, and a sprinkling of
+bark; so I knowed the river had begun to rise.  I reckoned I would have
+great times now if I was over at the town.  The June rise used to be
+always luck for me; because as soon as that rise begins here comes
+cordwood floating down, and pieces of log rafts--sometimes a dozen logs
+together; so all you have to do is to catch them and sell them to the
+wood-yards and the sawmill.
+
+I went along up the bank with one eye out for pap and t'other one out for
+what the rise might fetch along.  Well, all at once here comes a canoe;
+just a beauty, too, about thirteen or fourteen foot long, riding high
+like a duck.  I shot head-first off of the bank like a frog, clothes and
+all on, and struck out for the canoe.  I just expected there'd be
+somebody laying down in it, because people often done that to fool folks,
+and when a chap had pulled a skiff out most to it they'd raise up and
+laugh at him.  But it warn't so this time.  It was a drift-canoe sure
+enough, and I clumb in and paddled her ashore.  Thinks I, the old man
+will be glad when he sees this--she's worth ten dollars.  But when I
+got to shore pap wasn't in sight yet, and as I was running her into a
+little creek like a gully, all hung over with vines and willows, I struck
+another idea:  I judged I'd hide her good, and then, 'stead of taking to
+the woods when I run off, I'd go down the river about fifty mile and camp
+in one place for good, and not have such a rough time tramping on foot.
+
+It was pretty close to the shanty, and I thought I heard the old man
+coming all the time; but I got her hid; and then I out and looked around
+a bunch of willows, and there was the old man down the path a piece just
+drawing a bead on a bird with his gun.  So he hadn't seen anything.
+
+When he got along I was hard at it taking up a "trot" line.  He abused me
+a little for being so slow; but I told him I fell in the river, and that
+was what made me so long.  I knowed he would see I was wet, and then he
+would be asking questions.  We got five catfish off the lines and went
+home.
+
+While we laid off after breakfast to sleep up, both of us being about
+wore out, I got to thinking that if I could fix up some way to keep pap
+and the widow from trying to follow me, it would be a certainer thing
+than trusting to luck to get far enough off before they missed me; you
+see, all kinds of things might happen.  Well, I didn't see no way for a
+while, but by and by pap raised up a minute to drink another barrel of
+water, and he says:
+
+"Another time a man comes a-prowling round here you roust me out, you
+hear? That man warn't here for no good.  I'd a shot him.  Next time you
+roust me out, you hear?"
+
+Then he dropped down and went to sleep again; but what he had been saying
+give me the very idea I wanted.  I says to myself, I can fix it now so
+nobody won't think of following me.
+
+About twelve o'clock we turned out and went along up the bank.  The river
+was coming up pretty fast, and lots of driftwood going by on the rise.
+By and by along comes part of a log raft--nine logs fast together.  We
+went out with the skiff and towed it ashore.  Then we had dinner.
+Anybody but pap would a waited and seen the day through, so as to catch
+more stuff; but that warn't pap's style.  Nine logs was enough for one
+time; he must shove right over to town and sell.  So he locked me in and
+took the skiff, and started off towing the raft about half-past three.  I
+judged he wouldn't come back that night.  I waited till I reckoned he had
+got a good start; then I out with my saw, and went to work on that log
+again.  Before he was t'other side of the river I was out of the hole;
+him and his raft was just a speck on the water away off yonder.
+
+I took the sack of corn meal and took it to where the canoe was hid, and
+shoved the vines and branches apart and put it in; then I done the same
+with the side of bacon; then the whisky-jug.  I took all the coffee and
+sugar there was, and all the ammunition; I took the wadding; I took the
+bucket and gourd; I took a dipper and a tin cup, and my old saw and two
+blankets, and the skillet and the coffee-pot.  I took fish-lines and
+matches and other things--everything that was worth a cent.  I cleaned
+out the place.  I wanted an axe, but there wasn't any, only the one out
+at the woodpile, and I knowed why I was going to leave that.  I fetched
+out the gun, and now I was done.
+
+I had wore the ground a good deal crawling out of the hole and dragging
+out so many things.  So I fixed that as good as I could from the outside
+by scattering dust on the place, which covered up the smoothness and the
+sawdust.  Then I fixed the piece of log back into its place, and put two
+rocks under it and one against it to hold it there, for it was bent up at
+that place and didn't quite touch ground.  If you stood four or five foot
+away and didn't know it was sawed, you wouldn't never notice it; and
+besides, this was the back of the cabin, and it warn't likely anybody
+would go fooling around there.
+
+It was all grass clear to the canoe, so I hadn't left a track.  I
+followed around to see.  I stood on the bank and looked out over the
+river.  All safe.  So I took the gun and went up a piece into the woods,
+and was hunting around for some birds when I see a wild pig; hogs soon
+went wild in them bottoms after they had got away from the prairie farms.
+I shot this fellow and took him into camp.
+
+I took the axe and smashed in the door.  I beat it and hacked it
+considerable a-doing it.  I fetched the pig in, and took him back nearly
+to the table and hacked into his throat with the axe, and laid him down
+on the ground to bleed; I say ground because it was ground--hard packed,
+and no boards.  Well, next I took an old sack and put a lot of big rocks
+in it--all I could drag--and I started it from the pig, and dragged it
+to the door and through the woods down to the river and dumped it in, and
+down it sunk, out of sight.  You could easy see that something had been
+dragged over the ground.  I did wish Tom Sawyer was there; I knowed he
+would take an interest in this kind of business, and throw in the fancy
+touches.  Nobody could spread himself like Tom Sawyer in such a thing as
+that.
+
+Well, last I pulled out some of my hair, and blooded the axe good, and
+stuck it on the back side, and slung the axe in the corner.  Then I took
+up the pig and held him to my breast with my jacket (so he couldn't drip)
+till I got a good piece below the house and then dumped him into the
+river.  Now I thought of something else.  So I went and got the bag of
+meal and my old saw out of the canoe, and fetched them to the house.  I
+took the bag to where it used to stand, and ripped a hole in the bottom
+of it with the saw, for there warn't no knives and forks on the place
+--pap done everything with his clasp-knife about the cooking.  Then I
+carried the sack about a hundred yards across the grass and through the
+willows east of the house, to a shallow lake that was five mile wide and
+full of rushes--and ducks too, you might say, in the season.  There was a
+slough or a creek leading out of it on the other side that went miles
+away, I don't know where, but it didn't go to the river.  The meal sifted
+out and made a little track all the way to the lake.  I dropped pap's
+whetstone there too, so as to look like it had been done by accident.
+Then I tied up the rip in the meal sack with a string, so it wouldn't
+leak no more, and took it and my saw to the canoe again.
+
+It was about dark now; so I dropped the canoe down the river under some
+willows that hung over the bank, and waited for the moon to rise.  I made
+fast to a willow; then I took a bite to eat, and by and by laid down in
+the canoe to smoke a pipe and lay out a plan.  I says to myself, they'll
+follow the track of that sackful of rocks to the shore and then drag the
+river for me.  And they'll follow that meal track to the lake and go
+browsing down the creek that leads out of it to find the robbers that
+killed me and took the things.  They won't ever hunt the river for
+anything but my dead carcass. They'll soon get tired of that, and won't
+bother no more about me.  All right; I can stop anywhere I want to.
+Jackson's Island is good enough for me; I know that island pretty well,
+and nobody ever comes there.  And then I can paddle over to town nights,
+and slink around and pick up things I want. Jackson's Island's the place.
+
+I was pretty tired, and the first thing I knowed I was asleep.  When I
+woke up I didn't know where I was for a minute.  I set up and looked
+around, a little scared.  Then I remembered.  The river looked miles and
+miles across.  The moon was so bright I could a counted the drift logs
+that went a-slipping along, black and still, hundreds of yards out from
+shore. Everything was dead quiet, and it looked late, and SMELT late.
+You know what I mean--I don't know the words to put it in.
+
+I took a good gap and a stretch, and was just going to unhitch and start
+when I heard a sound away over the water.  I listened.  Pretty soon I
+made it out.  It was that dull kind of a regular sound that comes from
+oars working in rowlocks when it's a still night.  I peeped out through
+the willow branches, and there it was--a skiff, away across the water.  I
+couldn't tell how many was in it.  It kept a-coming, and when it was
+abreast of me I see there warn't but one man in it.  Think's I, maybe
+it's pap, though I warn't expecting him.  He dropped below me with the
+current, and by and by he came a-swinging up shore in the easy water, and
+he went by so close I could a reached out the gun and touched him.  Well,
+it WAS pap, sure enough--and sober, too, by the way he laid his oars.
+
+I didn't lose no time.  The next minute I was a-spinning down stream soft
+but quick in the shade of the bank.  I made two mile and a half, and then
+struck out a quarter of a mile or more towards the middle of the river,
+because pretty soon I would be passing the ferry landing, and people
+might see me and hail me.  I got out amongst the driftwood, and then laid
+down in the bottom of the canoe and let her float.  I laid there, and had
+a good rest and a smoke out of my pipe, looking away into the sky; not a
+cloud in it.  The sky looks ever so deep when you lay down on your back
+in the moonshine; I never knowed it before.  And how far a body can hear
+on the water such nights!  I heard people talking at the ferry landing.
+I heard what they said, too--every word of it.  One man said it was
+getting towards the long days and the short nights now.  T'other one said
+THIS warn't one of the short ones, he reckoned--and then they laughed,
+and he said it over again, and they laughed again; then they waked up
+another fellow and told him, and laughed, but he didn't laugh; he ripped
+out something brisk, and said let him alone.  The first fellow said he
+'lowed to tell it to his old woman--she would think it was pretty good;
+but he said that warn't nothing to some things he had said in his time.
+I heard one man say it was nearly three o'clock, and he hoped daylight
+wouldn't wait more than about a week longer.  After that the talk got
+further and further away, and I couldn't make out the words any more; but
+I could hear the mumble, and now and then a laugh, too, but it seemed a
+long ways off.
+
+I was away below the ferry now.  I rose up, and there was Jackson's
+Island, about two mile and a half down stream, heavy timbered and
+standing up out of the middle of the river, big and dark and solid, like
+a steamboat without any lights.  There warn't any signs of the bar at the
+head--it was all under water now.
+
+It didn't take me long to get there.  I shot past the head at a ripping
+rate, the current was so swift, and then I got into the dead water and
+landed on the side towards the Illinois shore.  I run the canoe into a
+deep dent in the bank that I knowed about; I had to part the willow
+branches to get in; and when I made fast nobody could a seen the canoe
+from the outside.
+
+I went up and set down on a log at the head of the island, and looked out
+on the big river and the black driftwood and away over to the town, three
+mile away, where there was three or four lights twinkling.  A monstrous
+big lumber-raft was about a mile up stream, coming along down, with a
+lantern in the middle of it.  I watched it come creeping down, and when
+it was most abreast of where I stood I heard a man say, "Stern oars,
+there! heave her head to stabboard!"  I heard that just as plain as if
+the man was by my side.
+
+There was a little gray in the sky now; so I stepped into the woods, and
+laid down for a nap before breakfast.
+
+
+
+
+CHAPTER VIII.
+
+THE sun was up so high when I waked that I judged it was after eight
+o'clock.  I laid there in the grass and the cool shade thinking about
+things, and feeling rested and ruther comfortable and satisfied.  I could
+see the sun out at one or two holes, but mostly it was big trees all
+about, and gloomy in there amongst them.  There was freckled places on
+the ground where the light sifted down through the leaves, and the
+freckled places swapped about a little, showing there was a little breeze
+up there.  A couple of squirrels set on a limb and jabbered at me very
+friendly.
+
+I was powerful lazy and comfortable--didn't want to get up and cook
+breakfast.  Well, I was dozing off again when I thinks I hears a deep
+sound of "boom!" away up the river.  I rouses up, and rests on my elbow
+and listens; pretty soon I hears it again.  I hopped up, and went and
+looked out at a hole in the leaves, and I see a bunch of smoke laying on
+the water a long ways up--about abreast the ferry.  And there was the
+ferryboat full of people floating along down.  I knowed what was the
+matter now.  "Boom!" I see the white smoke squirt out of the ferryboat's
+side.  You see, they was firing cannon over the water, trying to make my
+carcass come to the top.
+
+I was pretty hungry, but it warn't going to do for me to start a fire,
+because they might see the smoke.  So I set there and watched the
+cannon-smoke and listened to the boom.  The river was a mile wide there,
+and it always looks pretty on a summer morning--so I was having a good
+enough time seeing them hunt for my remainders if I only had a bite to
+eat. Well, then I happened to think how they always put quicksilver in
+loaves of bread and float them off, because they always go right to the
+drownded carcass and stop there.  So, says I, I'll keep a lookout, and if
+any of them's floating around after me I'll give them a show.  I changed
+to the Illinois edge of the island to see what luck I could have, and I
+warn't disappointed.  A big double loaf come along, and I most got it
+with a long stick, but my foot slipped and she floated out further.  Of
+course I was where the current set in the closest to the shore--I knowed
+enough for that.  But by and by along comes another one, and this time I
+won.  I took out the plug and shook out the little dab of quicksilver,
+and set my teeth in.  It was "baker's bread"--what the quality eat; none
+of your low-down corn-pone.
+
+I got a good place amongst the leaves, and set there on a log, munching
+the bread and watching the ferry-boat, and very well satisfied.  And then
+something struck me.  I says, now I reckon the widow or the parson or
+somebody prayed that this bread would find me, and here it has gone and
+done it.  So there ain't no doubt but there is something in that thing
+--that is, there's something in it when a body like the widow or the parson
+prays, but it don't work for me, and I reckon it don't work for only just
+the right kind.
+
+I lit a pipe and had a good long smoke, and went on watching.  The
+ferryboat was floating with the current, and I allowed I'd have a chance
+to see who was aboard when she come along, because she would come in
+close, where the bread did.  When she'd got pretty well along down
+towards me, I put out my pipe and went to where I fished out the bread,
+and laid down behind a log on the bank in a little open place.  Where the
+log forked I could peep through.
+
+By and by she come along, and she drifted in so close that they could a
+run out a plank and walked ashore.  Most everybody was on the boat.  Pap,
+and Judge Thatcher, and Bessie Thatcher, and Jo Harper, and Tom Sawyer,
+and his old Aunt Polly, and Sid and Mary, and plenty more.  Everybody was
+talking about the murder, but the captain broke in and says:
+
+"Look sharp, now; the current sets in the closest here, and maybe he's
+washed ashore and got tangled amongst the brush at the water's edge.  I
+hope so, anyway."
+
+I didn't hope so.  They all crowded up and leaned over the rails, nearly
+in my face, and kept still, watching with all their might.  I could see
+them first-rate, but they couldn't see me.  Then the captain sung out:
+
+"Stand away!" and the cannon let off such a blast right before me that it
+made me deef with the noise and pretty near blind with the smoke, and I
+judged I was gone.  If they'd a had some bullets in, I reckon they'd a
+got the corpse they was after.  Well, I see I warn't hurt, thanks to
+goodness. The boat floated on and went out of sight around the shoulder
+of the island.  I could hear the booming now and then, further and
+further off, and by and by, after an hour, I didn't hear it no more.  The
+island was three mile long.  I judged they had got to the foot, and was
+giving it up.  But they didn't yet a while.  They turned around the foot
+of the island and started up the channel on the Missouri side, under
+steam, and booming once in a while as they went.  I crossed over to that
+side and watched them. When they got abreast the head of the island they
+quit shooting and dropped over to the Missouri shore and went home to the
+town.
+
+I knowed I was all right now.  Nobody else would come a-hunting after me.
+I got my traps out of the canoe and made me a nice camp in the thick
+woods.  I made a kind of a tent out of my blankets to put my things under
+so the rain couldn't get at them.  I catched a catfish and haggled him
+open with my saw, and towards sundown I started my camp fire and had
+supper.  Then I set out a line to catch some fish for breakfast.
+
+When it was dark I set by my camp fire smoking, and feeling pretty well
+satisfied; but by and by it got sort of lonesome, and so I went and set
+on the bank and listened to the current swashing along, and counted the
+stars and drift logs and rafts that come down, and then went to bed;
+there ain't no better way to put in time when you are lonesome; you can't
+stay so, you soon get over it.
+
+And so for three days and nights.  No difference--just the same thing.
+But the next day I went exploring around down through the island.  I was
+boss of it; it all belonged to me, so to say, and I wanted to know all
+about it; but mainly I wanted to put in the time.  I found plenty
+strawberries, ripe and prime; and green summer grapes, and green
+razberries; and the green blackberries was just beginning to show.  They
+would all come handy by and by, I judged.
+
+Well, I went fooling along in the deep woods till I judged I warn't far
+from the foot of the island.  I had my gun along, but I hadn't shot
+nothing; it was for protection; thought I would kill some game nigh home.
+About this time I mighty near stepped on a good-sized snake, and it went
+sliding off through the grass and flowers, and I after it, trying to get
+a shot at it. I clipped along, and all of a sudden I bounded right on to
+the ashes of a camp fire that was still smoking.
+
+My heart jumped up amongst my lungs.  I never waited for to look further,
+but uncocked my gun and went sneaking back on my tiptoes as fast as ever
+I could.  Every now and then I stopped a second amongst the thick leaves
+and listened, but my breath come so hard I couldn't hear nothing else.  I
+slunk along another piece further, then listened again; and so on, and so
+on.  If I see a stump, I took it for a man; if I trod on a stick and
+broke it, it made me feel like a person had cut one of my breaths in two
+and I only got half, and the short half, too.
+
+When I got to camp I warn't feeling very brash, there warn't much sand in
+my craw; but I says, this ain't no time to be fooling around.  So I got
+all my traps into my canoe again so as to have them out of sight, and I
+put out the fire and scattered the ashes around to look like an old last
+year's camp, and then clumb a tree.
+
+I reckon I was up in the tree two hours; but I didn't see nothing, I
+didn't hear nothing--I only THOUGHT I heard and seen as much as a
+thousand things.  Well, I couldn't stay up there forever; so at last I
+got down, but I kept in the thick woods and on the lookout all the time.
+All I could get to eat was berries and what was left over from breakfast.
+
+By the time it was night I was pretty hungry.  So when it was good and
+dark I slid out from shore before moonrise and paddled over to the
+Illinois bank--about a quarter of a mile.  I went out in the woods and
+cooked a supper, and I had about made up my mind I would stay there all
+night when I hear a PLUNKETY-PLUNK, PLUNKETY-PLUNK, and says to myself,
+horses coming; and next I hear people's voices.  I got everything into
+the canoe as quick as I could, and then went creeping through the woods
+to see what I could find out.  I hadn't got far when I hear a man say:
+
+"We better camp here if we can find a good place; the horses is about
+beat out.  Let's look around."
+
+I didn't wait, but shoved out and paddled away easy.  I tied up in the
+old place, and reckoned I would sleep in the canoe.
+
+I didn't sleep much.  I couldn't, somehow, for thinking.  And every time
+I waked up I thought somebody had me by the neck.  So the sleep didn't do
+me no good.  By and by I says to myself, I can't live this way; I'm
+a-going to find out who it is that's here on the island with me; I'll
+find it out or bust.  Well, I felt better right off.
+
+So I took my paddle and slid out from shore just a step or two, and then
+let the canoe drop along down amongst the shadows.  The moon was shining,
+and outside of the shadows it made it most as light as day.  I poked
+along well on to an hour, everything still as rocks and sound asleep.
+Well, by this time I was most down to the foot of the island.  A little
+ripply, cool breeze begun to blow, and that was as good as saying the
+night was about done.  I give her a turn with the paddle and brung her
+nose to shore; then I got my gun and slipped out and into the edge of the
+woods.  I sat down there on a log, and looked out through the leaves.  I
+see the moon go off watch, and the darkness begin to blanket the river.
+But in a little while I see a pale streak over the treetops, and knowed
+the day was coming.  So I took my gun and slipped off towards where I had
+run across that camp fire, stopping every minute or two to listen.  But I
+hadn't no luck somehow; I couldn't seem to find the place.  But by and
+by, sure enough, I catched a glimpse of fire away through the trees.  I
+went for it, cautious and slow.  By and by I was close enough to have a
+look, and there laid a man on the ground.  It most give me the fan-tods.
+He had a blanket around his head, and his head was nearly in the fire.  I
+set there behind a clump of bushes, in about six foot of him, and kept my
+eyes on him steady.  It was getting gray daylight now.  Pretty soon he
+gapped and stretched himself and hove off the blanket, and it was Miss
+Watson's Jim!  I bet I was glad to see him.  I says:
+
+"Hello, Jim!" and skipped out.
+
+He bounced up and stared at me wild.  Then he drops down on his knees,
+and puts his hands together and says:
+
+"Doan' hurt me--don't!  I hain't ever done no harm to a ghos'.  I alwuz
+liked dead people, en done all I could for 'em.  You go en git in de
+river agin, whah you b'longs, en doan' do nuffn to Ole Jim, 'at 'uz awluz
+yo' fren'."
+
+Well, I warn't long making him understand I warn't dead.  I was ever so
+glad to see Jim.  I warn't lonesome now.  I told him I warn't afraid of
+HIM telling the people where I was.  I talked along, but he only set
+there and looked at me; never said nothing.  Then I says:
+
+"It's good daylight.  Le's get breakfast.  Make up your camp fire good."
+
+"What's de use er makin' up de camp fire to cook strawbries en sich
+truck? But you got a gun, hain't you?  Den we kin git sumfn better den
+strawbries."
+
+"Strawberries and such truck," I says.  "Is that what you live on?"
+
+"I couldn' git nuffn else," he says.
+
+"Why, how long you been on the island, Jim?"
+
+"I come heah de night arter you's killed."
+
+"What, all that time?"
+
+"Yes--indeedy."
+
+"And ain't you had nothing but that kind of rubbage to eat?"
+
+"No, sah--nuffn else."
+
+"Well, you must be most starved, ain't you?"
+
+"I reck'n I could eat a hoss.  I think I could. How long you ben on de
+islan'?"
+
+"Since the night I got killed."
+
+"No!  W'y, what has you lived on?  But you got a gun.  Oh, yes, you got a
+gun.  Dat's good.  Now you kill sumfn en I'll make up de fire."
+
+So we went over to where the canoe was, and while he built a fire in a
+grassy open place amongst the trees, I fetched meal and bacon and coffee,
+and coffee-pot and frying-pan, and sugar and tin cups, and the nigger was
+set back considerable, because he reckoned it was all done with
+witchcraft. I catched a good big catfish, too, and Jim cleaned him with
+his knife, and fried him.
+
+When breakfast was ready we lolled on the grass and eat it smoking hot.
+Jim laid it in with all his might, for he was most about starved.  Then
+when we had got pretty well stuffed, we laid off and lazied.  By and by
+Jim says:
+
+"But looky here, Huck, who wuz it dat 'uz killed in dat shanty ef it
+warn't you?"
+
+Then I told him the whole thing, and he said it was smart.  He said Tom
+Sawyer couldn't get up no better plan than what I had.  Then I says:
+
+"How do you come to be here, Jim, and how'd you get here?"
+
+He looked pretty uneasy, and didn't say nothing for a minute.  Then he
+says:
+
+"Maybe I better not tell."
+
+"Why, Jim?"
+
+"Well, dey's reasons.  But you wouldn' tell on me ef I uz to tell you,
+would you, Huck?"
+
+"Blamed if I would, Jim."
+
+"Well, I b'lieve you, Huck.  I--I RUN OFF."
+
+"Jim!"
+
+"But mind, you said you wouldn' tell--you know you said you wouldn' tell,
+Huck."
+
+"Well, I did.  I said I wouldn't, and I'll stick to it.  Honest INJUN, I
+will.  People would call me a low-down Abolitionist and despise me for
+keeping mum--but that don't make no difference.  I ain't a-going to tell,
+and I ain't a-going back there, anyways.  So, now, le's know all about
+it."
+
+"Well, you see, it 'uz dis way.  Ole missus--dat's Miss Watson--she pecks
+on me all de time, en treats me pooty rough, but she awluz said she
+wouldn' sell me down to Orleans.  But I noticed dey wuz a nigger trader
+roun' de place considable lately, en I begin to git oneasy.  Well, one
+night I creeps to de do' pooty late, en de do' warn't quite shet, en I
+hear old missus tell de widder she gwyne to sell me down to Orleans, but
+she didn' want to, but she could git eight hund'd dollars for me, en it
+'uz sich a big stack o' money she couldn' resis'.  De widder she try to
+git her to say she wouldn' do it, but I never waited to hear de res'.  I
+lit out 

<TRUNCATED>

[18/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
deleted file mode 100644
index 68c0dc4..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsConstants;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Utilities for configuring file systems to support the separate working directory per each thread.
- */
-public class HadoopFileSystemsUtils {
-    /** Name of the property for setting working directory on create new local FS instance. */
-    public static final String LOC_FS_WORK_DIR_PROP = "fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".workDir";
-
-    /**
-     * Setup wrappers of filesystems to support the separate working directory.
-     *
-     * @param cfg Config for setup.
-     */
-    public static void setupFileSystems(Configuration cfg) {
-        cfg.set("fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl", HadoopLocalFileSystemV1.class.getName());
-        cfg.set("fs.AbstractFileSystem." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl",
-                HadoopLocalFileSystemV2.class.getName());
-    }
-
-    /**
-     * Gets the property name to disable file system cache.
-     * @param scheme The file system URI scheme.
-     * @return The property name. If scheme is null,
-     * returns "fs.null.impl.disable.cache".
-     */
-    public static String disableFsCachePropertyName(@Nullable String scheme) {
-        return String.format("fs.%s.impl.disable.cache", scheme);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
deleted file mode 100644
index 681cddb..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.jsr166.ConcurrentHashMap8;
-
-/**
- * Maps values by keys.
- * Values are created lazily using {@link ValueFactory}.
- *
- * Despite of the name, does not depend on any Hadoop classes.
- */
-public class HadoopLazyConcurrentMap<K, V extends Closeable> {
-    /** The map storing the actual values. */
-    private final ConcurrentMap<K, ValueWrapper> map = new ConcurrentHashMap8<>();
-
-    /** The factory passed in by the client. Will be used for lazy value creation. */
-    private final ValueFactory<K, V> factory;
-
-    /** Lock used to close the objects. */
-    private final ReadWriteLock closeLock = new ReentrantReadWriteLock();
-
-    /** Flag indicating that this map is closed and cleared. */
-    private boolean closed;
-
-    /**
-     * Constructor.
-     * @param factory the factory to create new values lazily.
-     */
-    public HadoopLazyConcurrentMap(ValueFactory<K, V> factory) {
-        this.factory = factory;
-
-        assert getClass().getClassLoader() == Ignite.class.getClassLoader();
-    }
-
-    /**
-     * Gets cached or creates a new value of V.
-     * Never returns null.
-     * @param k the key to associate the value with.
-     * @return the cached or newly created value, never null.
-     * @throws IgniteException on error
-     */
-    public V getOrCreate(K k) {
-        ValueWrapper w = map.get(k);
-
-        if (w == null) {
-            closeLock.readLock().lock();
-
-            try {
-                if (closed)
-                    throw new IllegalStateException("Failed to create value for key [" + k
-                        + "]: the map is already closed.");
-
-                final ValueWrapper wNew = new ValueWrapper(k);
-
-                w = map.putIfAbsent(k, wNew);
-
-                if (w == null) {
-                    wNew.init();
-
-                    w = wNew;
-                }
-            }
-            finally {
-                closeLock.readLock().unlock();
-            }
-        }
-
-        try {
-            V v = w.getValue();
-
-            assert v != null;
-
-            return v;
-        }
-        catch (IgniteCheckedException ie) {
-            throw new IgniteException(ie);
-        }
-    }
-
-    /**
-     * Clears the map and closes all the values.
-     */
-    public void close() throws IgniteCheckedException {
-        closeLock.writeLock().lock();
-
-        try {
-            if (closed)
-                return;
-
-            closed = true;
-
-            Exception err = null;
-
-            Set<K> keySet = map.keySet();
-
-            for (K key : keySet) {
-                V v = null;
-
-                try {
-                    v = map.get(key).getValue();
-                }
-                catch (IgniteCheckedException ignore) {
-                    // No-op.
-                }
-
-                if (v != null) {
-                    try {
-                        v.close();
-                    }
-                    catch (Exception err0) {
-                        if (err == null)
-                            err = err0;
-                    }
-                }
-            }
-
-            map.clear();
-
-            if (err != null)
-                throw new IgniteCheckedException(err);
-        }
-        finally {
-            closeLock.writeLock().unlock();
-        }
-    }
-
-    /**
-     * Helper class that drives the lazy value creation.
-     */
-    private class ValueWrapper {
-        /** Future. */
-        private final GridFutureAdapter<V> fut = new GridFutureAdapter<>();
-
-        /** the key */
-        private final K key;
-
-        /**
-         * Creates new wrapper.
-         */
-        private ValueWrapper(K key) {
-            this.key = key;
-        }
-
-        /**
-         * Initializes the value using the factory.
-         */
-        private void init() {
-            try {
-                final V v0 = factory.createValue(key);
-
-                if (v0 == null)
-                    throw new IgniteException("Failed to create non-null value. [key=" + key + ']');
-
-                fut.onDone(v0);
-            }
-            catch (Throwable e) {
-                fut.onDone(e);
-            }
-        }
-
-        /**
-         * Gets the available value or blocks until the value is initialized.
-         * @return the value, never null.
-         * @throws IgniteCheckedException on error.
-         */
-        V getValue() throws IgniteCheckedException {
-            return fut.get();
-        }
-    }
-
-    /**
-     * Interface representing the factory that creates map values.
-     * @param <K> the type of the key.
-     * @param <V> the type of the value.
-     */
-    public interface ValueFactory <K, V> {
-        /**
-         * Creates the new value. Should never return null.
-         *
-         * @param key the key to create value for
-         * @return the value.
-         * @throws IOException On failure.
-         */
-        public V createValue(K key) throws IOException;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
deleted file mode 100644
index cbb007f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import java.io.File;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-
-/**
- * Local file system replacement for Hadoop jobs.
- */
-public class HadoopLocalFileSystemV1 extends LocalFileSystem {
-    /**
-     * Creates new local file system.
-     */
-    public HadoopLocalFileSystemV1() {
-        super(new HadoopRawLocalFileSystem());
-    }
-
-    /** {@inheritDoc} */
-    @Override public File pathToFile(Path path) {
-        return ((HadoopRawLocalFileSystem)getRaw()).convert(path);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
deleted file mode 100644
index 2484492..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ChecksumFs;
-import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.local.LocalConfigKeys;
-
-import static org.apache.hadoop.fs.FsConstants.LOCAL_FS_URI;
-
-/**
- * Local file system replacement for Hadoop jobs.
- */
-public class HadoopLocalFileSystemV2 extends ChecksumFs {
-    /**
-     * Creates new local file system.
-     *
-     * @param cfg Configuration.
-     * @throws IOException If failed.
-     * @throws URISyntaxException If failed.
-     */
-    public HadoopLocalFileSystemV2(Configuration cfg) throws IOException, URISyntaxException {
-        super(new DelegateFS(cfg));
-    }
-
-    /**
-     * Creates new local file system.
-     *
-     * @param uri URI.
-     * @param cfg Configuration.
-     * @throws IOException If failed.
-     * @throws URISyntaxException If failed.
-     */
-    public HadoopLocalFileSystemV2(URI uri, Configuration cfg) throws IOException, URISyntaxException {
-        this(cfg);
-    }
-
-    /**
-     * Delegate file system.
-     */
-    private static class DelegateFS extends DelegateToFileSystem {
-        /**
-         * Creates new local file system.
-         *
-         * @param cfg Configuration.
-         * @throws IOException If failed.
-         * @throws URISyntaxException If failed.
-         */
-        public DelegateFS(Configuration cfg) throws IOException, URISyntaxException {
-            super(LOCAL_FS_URI, new HadoopRawLocalFileSystem(), cfg, LOCAL_FS_URI.getScheme(), false);
-        }
-
-        /** {@inheritDoc} */
-        @Override public int getUriDefaultPort() {
-            return -1;
-        }
-
-        /** {@inheritDoc} */
-        @Override public FsServerDefaults getServerDefaults() throws IOException {
-            return LocalConfigKeys.getServerDefaults();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean isValidName(String src) {
-            return true;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
deleted file mode 100644
index 0aac4a3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-/**
- * This class lists parameters that can be specified in Hadoop configuration.
- * Hadoop configuration can be specified in {@code core-site.xml} file
- * or passed to map-reduce task directly when using Hadoop driver for IGFS file system:
- * <ul>
- *     <li>
- *         {@code fs.igfs.[name].open.sequential_reads_before_prefetch} - this parameter overrides
- *         the one specified in {@link org.apache.ignite.configuration.FileSystemConfiguration#getSequentialReadsBeforePrefetch()}
- *         IGFS data node configuration property.
- *     </li>
- *     <li>
- *         {@code fs.igfs.[name].log.enabled} - specifies whether IGFS sampling logger is enabled. If
- *         {@code true}, then all file system operations will be logged to a file.
- *     </li>
- *     <li>{@code fs.igfs.[name].log.dir} - specifies log directory where sampling log files should be placed.</li>
- *     <li>
- *         {@code fs.igfs.[name].log.batch_size} - specifies how many log entries are accumulated in a batch before
- *         it gets flushed to log file. Higher values will imply greater performance, but will increase delay
- *         before record appears in the log file.
- *     </li>
- *     <li>
- *         {@code fs.igfs.[name].colocated.writes} - specifies whether written files should be colocated on data
- *         node to which client is connected. If {@code true}, file will not be distributed and will be written
- *         to a single data node. Default value is {@code true}.
- *     </li>
- *     <li>
- *         {@code fs.igfs.prefer.local.writes} - specifies whether file preferably should be written to
- *         local data node if it has enough free space. After some time it can be redistributed across nodes though.
- *     </li>
- * </ul>
- * Where {@code [name]} is file system endpoint which you specify in file system URI authority part. E.g. in
- * case your file system URI is {@code igfs://127.0.0.1:10500} then {@code name} will be {@code 127.0.0.1:10500}.
- * <p>
- * Sample configuration that can be placed to {@code core-site.xml} file:
- * <pre name="code" class="xml">
- *     &lt;property&gt;
- *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.enabled&lt;/name&gt;
- *         &lt;value&gt;true&lt;/value&gt;
- *     &lt;/property&gt;
- *     &lt;property&gt;
- *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.dir&lt;/name&gt;
- *         &lt;value&gt;/home/apache/ignite/log/sampling&lt;/value&gt;
- *     &lt;/property&gt;
- *     &lt;property&gt;
- *         &lt;name&gt;fs.igfs.127.0.0.1:10500.log.batch_size&lt;/name&gt;
- *         &lt;value&gt;16&lt;/value&gt;
- *     &lt;/property&gt;
- * </pre>
- * Parameters could also be specified per mapreduce job, e.g.
- * <pre name="code" class="bash">
- * hadoop jar myjarfile.jar MyMapReduceJob -Dfs.igfs.open.sequential_reads_before_prefetch=4
- * </pre>
- * If you want to use these parameters in code, then you have to substitute you file system name in it. The easiest
- * way to do that is {@code String.format(PARAM_IGFS_COLOCATED_WRITES, [name])}.
- */
-public class HadoopParameters {
-    /** Parameter name for control over file colocation write mode. */
-    public static final String PARAM_IGFS_COLOCATED_WRITES = "fs.igfs.%s.colocated.writes";
-
-    /** Parameter name for custom sequential reads before prefetch value. */
-    public static final String PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH =
-        "fs.igfs.%s.open.sequential_reads_before_prefetch";
-
-    /** Parameter name for client logger directory. */
-    public static final String PARAM_IGFS_LOG_DIR = "fs.igfs.%s.log.dir";
-
-    /** Parameter name for log batch size. */
-    public static final String PARAM_IGFS_LOG_BATCH_SIZE = "fs.igfs.%s.log.batch_size";
-
-    /** Parameter name for log enabled flag. */
-    public static final String PARAM_IGFS_LOG_ENABLED = "fs.igfs.%s.log.enabled";
-
-    /** Parameter name for prefer local writes flag. */
-    public static final String PARAM_IGFS_PREFER_LOCAL_WRITES = "fs.igfs.prefer.local.writes";
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
deleted file mode 100644
index b8fc8e7..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.RandomAccessFile;
-import java.net.URI;
-import java.nio.file.Files;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.Progressable;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Local file system implementation for Hadoop.
- */
-public class HadoopRawLocalFileSystem extends FileSystem {
-    /** Working directory for each thread. */
-    private final ThreadLocal<Path> workDir = new ThreadLocal<Path>() {
-        @Override protected Path initialValue() {
-            return getInitialWorkingDirectory();
-        }
-    };
-
-    /**
-     * Converts Hadoop path to local path.
-     *
-     * @param path Hadoop path.
-     * @return Local path.
-     */
-    File convert(Path path) {
-        checkPath(path);
-
-        if (path.isAbsolute())
-            return new File(path.toUri().getPath());
-
-        return new File(getWorkingDirectory().toUri().getPath(), path.toUri().getPath());
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getHomeDirectory() {
-        return makeQualified(new Path(System.getProperty("user.home")));
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getInitialWorkingDirectory() {
-        File f = new File(System.getProperty("user.dir"));
-
-        return new Path(f.getAbsoluteFile().toURI()).makeQualified(getUri(), null);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void initialize(URI uri, Configuration conf) throws IOException {
-        super.initialize(uri, conf);
-
-        setConf(conf);
-
-        String initWorkDir = conf.get(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP);
-
-        if (initWorkDir != null)
-            setWorkingDirectory(new Path(initWorkDir));
-    }
-
-    /** {@inheritDoc} */
-    @Override public URI getUri() {
-        return FsConstants.LOCAL_FS_URI;
-    }
-
-    /** {@inheritDoc} */
-    @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-        return new FSDataInputStream(new InStream(checkExists(convert(f))));
-    }
-
-    /** {@inheritDoc} */
-    @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufSize,
-        short replication, long blockSize, Progressable progress) throws IOException {
-        File file = convert(f);
-
-        if (!overwrite && !file.createNewFile())
-            throw new IOException("Failed to create new file: " + f.toUri());
-
-        return out(file, false, bufSize);
-    }
-
-    /**
-     * @param file File.
-     * @param append Append flag.
-     * @return Output stream.
-     * @throws IOException If failed.
-     */
-    private FSDataOutputStream out(File file, boolean append, int bufSize) throws IOException {
-        return new FSDataOutputStream(new BufferedOutputStream(new FileOutputStream(file, append),
-            bufSize < 32 * 1024 ? 32 * 1024 : bufSize), new Statistics(getUri().getScheme()));
-    }
-
-    /** {@inheritDoc} */
-    @Override public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
-        return out(convert(f), true, bufSize);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean rename(Path src, Path dst) throws IOException {
-        return convert(src).renameTo(convert(dst));
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean delete(Path f, boolean recursive) throws IOException {
-        File file = convert(f);
-
-        if (file.isDirectory() && !recursive)
-            throw new IOException("Failed to remove directory in non recursive mode: " + f.toUri());
-
-        return U.delete(file);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setWorkingDirectory(Path dir) {
-        workDir.set(fixRelativePart(dir));
-
-        checkPath(dir);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getWorkingDirectory() {
-        return workDir.get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException {
-        if(f == null)
-            throw new IllegalArgumentException("mkdirs path arg is null");
-
-        Path parent = f.getParent();
-
-        File p2f = convert(f);
-
-        if(parent != null) {
-            File parent2f = convert(parent);
-
-            if(parent2f != null && parent2f.exists() && !parent2f.isDirectory())
-                throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
-
-        }
-
-        return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus getFileStatus(Path f) throws IOException {
-        return fileStatus(checkExists(convert(f)));
-    }
-
-    /**
-     * @return File status.
-     */
-    private FileStatus fileStatus(File file) throws IOException {
-        boolean dir = file.isDirectory();
-
-        java.nio.file.Path path = dir ? null : file.toPath();
-
-        return new FileStatus(dir ? 0 : file.length(), dir, 1, 4 * 1024, file.lastModified(), file.lastModified(),
-            /*permission*/null, /*owner*/null, /*group*/null, dir ? null : Files.isSymbolicLink(path) ?
-            new Path(Files.readSymbolicLink(path).toUri()) : null, new Path(file.toURI()));
-    }
-
-    /**
-     * @param file File.
-     * @return Same file.
-     * @throws FileNotFoundException If does not exist.
-     */
-    private static File checkExists(File file) throws FileNotFoundException {
-        if (!file.exists())
-            throw new FileNotFoundException("File " + file.getAbsolutePath() + " does not exist.");
-
-        return file;
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus[] listStatus(Path f) throws IOException {
-        File file = convert(f);
-
-        if (checkExists(file).isFile())
-            return new FileStatus[] {fileStatus(file)};
-
-        File[] files = file.listFiles();
-
-        FileStatus[] res = new FileStatus[files.length];
-
-        for (int i = 0; i < res.length; i++)
-            res[i] = fileStatus(files[i]);
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean supportsSymlinks() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void createSymlink(Path target, Path link, boolean createParent) throws IOException {
-        Files.createSymbolicLink(convert(link).toPath(), convert(target).toPath());
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus getFileLinkStatus(Path f) throws IOException {
-        return getFileStatus(getLinkTarget(f));
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getLinkTarget(Path f) throws IOException {
-        File file = Files.readSymbolicLink(convert(f).toPath()).toFile();
-
-        return new Path(file.toURI());
-    }
-
-    /**
-     * Input stream.
-     */
-    private static class InStream extends InputStream implements Seekable, PositionedReadable {
-        /** */
-        private final RandomAccessFile file;
-
-        /**
-         * @param f File.
-         * @throws IOException If failed.
-         */
-        public InStream(File f) throws IOException {
-            file = new RandomAccessFile(f, "r");
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized int read() throws IOException {
-            return file.read();
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized int read(byte[] b, int off, int len) throws IOException {
-            return file.read(b, off, len);
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized void close() throws IOException {
-            file.close();
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized int read(long pos, byte[] buf, int off, int len) throws IOException {
-            long pos0 = file.getFilePointer();
-
-            file.seek(pos);
-            int res = file.read(buf, off, len);
-
-            file.seek(pos0);
-
-            return res;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void readFully(long pos, byte[] buf, int off, int len) throws IOException {
-            if (read(pos, buf, off, len) != len)
-                throw new IOException();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void readFully(long pos, byte[] buf) throws IOException {
-            readFully(pos, buf, 0, buf.length);
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized void seek(long pos) throws IOException {
-            file.seek(pos);
-        }
-
-        /** {@inheritDoc} */
-        @Override public synchronized long getPos() throws IOException {
-            return file.getFilePointer();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean seekToNewSource(long targetPos) throws IOException {
-            return false;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
deleted file mode 100644
index fe43596..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsStatus;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Facade for communication with grid.
- */
-public interface HadoopIgfs {
-    /**
-     * Perform handshake.
-     *
-     * @param logDir Log directory.
-     * @return Future with handshake result.
-     * @throws IgniteCheckedException If failed.
-     */
-    public IgfsHandshakeResponse handshake(String logDir) throws IgniteCheckedException, IOException;
-
-    /**
-     * Close connection.
-     *
-     * @param force Force flag.
-     */
-    public void close(boolean force);
-
-    /**
-     * Command to retrieve file info for some IGFS path.
-     *
-     * @param path Path to get file info for.
-     * @return Future for info operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public IgfsFile info(IgfsPath path) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to update file properties.
-     *
-     * @param path IGFS path to update properties.
-     * @param props Properties to update.
-     * @return Future for update operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteCheckedException, IOException;
-
-    /**
-     * Sets last access time and last modification time for a file.
-     *
-     * @param path Path to update times.
-     * @param accessTime Last access time to set.
-     * @param modificationTime Last modification time to set.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Boolean setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteCheckedException,
-        IOException;
-
-    /**
-     * Command to rename given path.
-     *
-     * @param src Source path.
-     * @param dest Destination path.
-     * @return Future for rename operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Boolean rename(IgfsPath src, IgfsPath dest) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to delete given path.
-     *
-     * @param path Path to delete.
-     * @param recursive {@code True} if deletion is recursive.
-     * @return Future for delete operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Boolean delete(IgfsPath path, boolean recursive) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to get affinity for given path, offset and length.
-     *
-     * @param path Path to get affinity for.
-     * @param start Start position (offset).
-     * @param len Data length.
-     * @return Future for affinity command.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) throws IgniteCheckedException,
-        IOException;
-
-    /**
-     * Gets path summary.
-     *
-     * @param path Path to get summary for.
-     * @return Future that will be completed when summary is received.
-     * @throws IgniteCheckedException If failed.
-     */
-    public IgfsPathSummary contentSummary(IgfsPath path) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to create directories.
-     *
-     * @param path Path to create.
-     * @return Future for mkdirs operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Boolean mkdirs(IgfsPath path, Map<String, String> props) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to get list of files in directory.
-     *
-     * @param path Path to list.
-     * @return Future for listFiles operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to get directory listing.
-     *
-     * @param path Path to list.
-     * @return Future for listPaths operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteCheckedException, IOException;
-
-    /**
-     * Performs status request.
-     *
-     * @return Status response.
-     * @throws IgniteCheckedException If failed.
-     */
-    public IgfsStatus fsStatus() throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to open file for reading.
-     *
-     * @param path File path to open.
-     * @return Future for open operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException, IOException;
-
-    /**
-     * Command to open file for reading.
-     *
-     * @param path File path to open.
-     * @return Future for open operation.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopIgfsStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException,
-        IOException;
-
-    /**
-     * Command to create file and open it for output.
-     *
-     * @param path Path to file.
-     * @param overwrite If {@code true} then old file contents will be lost.
-     * @param colocate If {@code true} and called on data node, file will be written on that node.
-     * @param replication Replication factor.
-     * @param props File properties for creation.
-     * @return Stream descriptor.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate,
-        int replication, long blockSize, @Nullable Map<String, String> props) throws IgniteCheckedException, IOException;
-
-    /**
-     * Open file for output appending data to the end of a file.
-     *
-     * @param path Path to file.
-     * @param create If {@code true}, file will be created if does not exist.
-     * @param props File properties.
-     * @return Stream descriptor.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create,
-        @Nullable Map<String, String> props) throws IgniteCheckedException, IOException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
deleted file mode 100644
index d610091..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.ignite.IgniteCheckedException;
-
-/**
- * Communication exception indicating a problem between file system and IGFS instance.
- */
-public class HadoopIgfsCommunicationException extends IgniteCheckedException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Creates new exception with given throwable as a nested cause and
-     * source of error message.
-     *
-     * @param cause Non-null throwable cause.
-     */
-    public HadoopIgfsCommunicationException(Exception cause) {
-        super(cause);
-    }
-
-    /**
-     * Creates a new exception with given error message and optional nested cause exception.
-     *
-     * @param msg Error message.
-     */
-    public HadoopIgfsCommunicationException(String msg) {
-        super(msg);
-    }
-
-    /**
-     * Creates a new exception with given error message and optional nested cause exception.
-     *
-     * @param msg Error message.
-     * @param cause Cause.
-     */
-    public HadoopIgfsCommunicationException(String msg, Exception cause) {
-        super(msg, cause);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
deleted file mode 100644
index 014e2a1..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Extended IGFS server interface.
- */
-public interface HadoopIgfsEx extends HadoopIgfs {
-    /**
-     * Adds event listener that will be invoked when connection with server is lost or remote error has occurred.
-     * If connection is closed already, callback will be invoked synchronously inside this method.
-     *
-     * @param delegate Stream delegate.
-     * @param lsnr Event listener.
-     */
-    public void addEventListener(HadoopIgfsStreamDelegate delegate, HadoopIgfsStreamEventListener lsnr);
-
-    /**
-     * Removes event listener that will be invoked when connection with server is lost or remote error has occurred.
-     *
-     * @param delegate Stream delegate.
-     */
-    public void removeEventListener(HadoopIgfsStreamDelegate delegate);
-
-    /**
-     * Asynchronously reads specified amount of bytes from opened input stream.
-     *
-     * @param delegate Stream delegate.
-     * @param pos Position to read from.
-     * @param len Data length to read.
-     * @param outBuf Optional output buffer. If buffer length is less then {@code len}, all remaining
-     *     bytes will be read into new allocated buffer of length {len - outBuf.length} and this buffer will
-     *     be the result of read future.
-     * @param outOff Output offset.
-     * @param outLen Output length.
-     * @return Read data.
-     */
-    public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len,
-        @Nullable final byte[] outBuf, final int outOff, final int outLen);
-
-    /**
-     * Writes data to the stream with given streamId. This method does not return any future since
-     * no response to write request is sent.
-     *
-     * @param delegate Stream delegate.
-     * @param data Data to write.
-     * @param off Offset.
-     * @param len Length.
-     * @throws IOException If failed.
-     */
-    public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException;
-
-    /**
-     * Close server stream.
-     *
-     * @param delegate Stream delegate.
-     * @throws IOException If failed.
-     */
-    public void closeStream(HadoopIgfsStreamDelegate delegate) throws IOException;
-
-    /**
-     * Flush output stream.
-     *
-     * @param delegate Stream delegate.
-     * @throws IOException If failed.
-     */
-    public void flush(HadoopIgfsStreamDelegate delegate) throws IOException;
-
-    /**
-     * The user this Igfs instance works on behalf of.
-     * @return the user name.
-     */
-    public String user();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
deleted file mode 100644
index 5ff1b2e..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * IGFS client future that holds response parse closure.
- */
-public class HadoopIgfsFuture<T> extends GridFutureAdapter<T> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Output buffer. */
-    private byte[] outBuf;
-
-    /** Output offset. */
-    private int outOff;
-
-    /** Output length. */
-    private int outLen;
-
-    /** Read future flag. */
-    private boolean read;
-
-    /**
-     * @return Output buffer.
-     */
-    public byte[] outputBuffer() {
-        return outBuf;
-    }
-
-    /**
-     * @param outBuf Output buffer.
-     */
-    public void outputBuffer(@Nullable byte[] outBuf) {
-        this.outBuf = outBuf;
-    }
-
-    /**
-     * @return Offset in output buffer to write from.
-     */
-    public int outputOffset() {
-        return outOff;
-    }
-
-    /**
-     * @param outOff Offset in output buffer to write from.
-     */
-    public void outputOffset(int outOff) {
-        this.outOff = outOff;
-    }
-
-    /**
-     * @return Length to write to output buffer.
-     */
-    public int outputLength() {
-        return outLen;
-    }
-
-    /**
-     * @param outLen Length to write to output buffer.
-     */
-    public void outputLength(int outLen) {
-        this.outLen = outLen;
-    }
-
-    /**
-     * @param read {@code True} if this is a read future.
-     */
-    public void read(boolean read) {
-        this.read = read;
-    }
-
-    /**
-     * @return {@code True} if this is a read future.
-     */
-    public boolean read() {
-        return read;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
deleted file mode 100644
index 3220538..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.commons.logging.Log;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.IgfsOutputStream;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.igfs.IgfsUserContext;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsStatus;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.future.GridFinishedFuture;
-import org.apache.ignite.lang.IgniteOutClosure;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Communication with grid in the same process.
- */
-public class HadoopIgfsInProc implements HadoopIgfsEx {
-    /** Target IGFS. */
-    private final IgfsEx igfs;
-
-    /** Buffer size. */
-    private final int bufSize;
-
-    /** Event listeners. */
-    private final Map<HadoopIgfsStreamDelegate, HadoopIgfsStreamEventListener> lsnrs =
-        new ConcurrentHashMap<>();
-
-    /** Logger. */
-    private final Log log;
-
-    /** The user this Igfs works on behalf of. */
-    private final String user;
-
-    /**
-     * Constructor.
-     *
-     * @param igfs Target IGFS.
-     * @param log Log.
-     */
-    public HadoopIgfsInProc(IgfsEx igfs, Log log, String userName) throws IgniteCheckedException {
-        this.user = IgfsUtils.fixUserName(userName);
-
-        this.igfs = igfs;
-
-        this.log = log;
-
-        bufSize = igfs.configuration().getBlockSize() * 2;
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsHandshakeResponse handshake(final String logDir) {
-        return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsHandshakeResponse>() {
-            @Override public IgfsHandshakeResponse apply() {
-                igfs.clientLogDirectory(logDir);
-
-                return new IgfsHandshakeResponse(igfs.name(), igfs.proxyPaths(), igfs.groupBlockSize(),
-                    igfs.globalSampling());
-                }
-         });
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close(boolean force) {
-        // Perform cleanup.
-        for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) {
-            try {
-                lsnr.onClose();
-            }
-            catch (IgniteCheckedException e) {
-                if (log.isDebugEnabled())
-                    log.debug("Failed to notify stream event listener", e);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(final IgfsPath path) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsFile>() {
-                @Override public IgfsFile apply() {
-                    return igfs.info(path);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to get file info because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile update(final IgfsPath path, final Map<String, String> props) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsFile>() {
-                @Override public IgfsFile apply() {
-                    return igfs.update(path, props);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to update file because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean setTimes(final IgfsPath path, final long accessTime, final long modificationTime) throws IgniteCheckedException {
-        try {
-            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
-                @Override public Void apply() {
-                    igfs.setTimes(path, accessTime, modificationTime);
-
-                    return null;
-                }
-            });
-
-            return true;
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to set path times because Grid is stopping: " +
-                path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean rename(final IgfsPath src, final IgfsPath dest) throws IgniteCheckedException {
-        try {
-            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
-                @Override public Void apply() {
-                    igfs.rename(src, dest);
-
-                    return null;
-                }
-            });
-
-            return true;
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to rename path because Grid is stopping: " + src);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean delete(final IgfsPath path, final boolean recursive) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<Boolean>() {
-                @Override public Boolean apply() {
-                    return igfs.delete(path, recursive);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to delete path because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsStatus fsStatus() throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new Callable<IgfsStatus>() {
-                @Override public IgfsStatus call() throws IgniteCheckedException {
-                    return igfs.globalSpace();
-                }
-            });
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to get file system status because Grid is " +
-                "stopping.");
-        }
-        catch (IgniteCheckedException | RuntimeException | Error e) {
-            throw e;
-        }
-        catch (Exception e) {
-            throw new AssertionError("Must never go there.");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(final IgfsPath path) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsPath>>() {
-                @Override public Collection<IgfsPath> apply() {
-                    return igfs.listPaths(path);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to list paths because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(final IgfsPath path) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsFile>>() {
-                @Override public Collection<IgfsFile> apply() {
-                    return igfs.listFiles(path);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to list files because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean mkdirs(final IgfsPath path, final Map<String, String> props) throws IgniteCheckedException {
-        try {
-            IgfsUserContext.doAs(user, new IgniteOutClosure<Void>() {
-                @Override public Void apply() {
-                    igfs.mkdirs(path, props);
-
-                    return null;
-                }
-            });
-
-            return true;
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to create directory because Grid is stopping: " +
-                path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsPathSummary contentSummary(final IgfsPath path) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<IgfsPathSummary>() {
-                @Override public IgfsPathSummary apply() {
-                    return igfs.summary(path);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to get content summary because Grid is stopping: " +
-                path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsBlockLocation> affinity(final IgfsPath path, final long start, final long len)
-        throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<Collection<IgfsBlockLocation>>() {
-                @Override public Collection<IgfsBlockLocation> apply() {
-                    return igfs.affinity(path, start, len);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to get affinity because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
-                @Override public HadoopIgfsStreamDelegate apply() {
-                    IgfsInputStream stream = igfs.open(path, bufSize);
-
-                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream, stream.length());
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch)
-        throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
-                @Override public HadoopIgfsStreamDelegate apply() {
-                    IgfsInputStream stream = igfs.open(path, bufSize, seqReadsBeforePrefetch);
-
-                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream, stream.length());
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate create(final IgfsPath path, final boolean overwrite, final boolean colocate,
-        final int replication, final long blockSize, final @Nullable Map<String, String> props) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
-                @Override public HadoopIgfsStreamDelegate apply() {
-                    IgfsOutputStream stream = igfs.create(path, bufSize, overwrite,
-                        colocate ? igfs.nextAffinityKey() : null, replication, blockSize, props);
-
-                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to create file because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate append(final IgfsPath path, final boolean create,
-        final @Nullable Map<String, String> props) throws IgniteCheckedException {
-        try {
-            return IgfsUserContext.doAs(user, new IgniteOutClosure<HadoopIgfsStreamDelegate>() {
-                @Override public HadoopIgfsStreamDelegate apply() {
-                    IgfsOutputStream stream = igfs.append(path, bufSize, create, props);
-
-                    return new HadoopIgfsStreamDelegate(HadoopIgfsInProc.this, stream);
-                }
-            });
-        }
-        catch (IgniteException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (IllegalStateException e) {
-            throw new HadoopIgfsCommunicationException("Failed to append file because Grid is stopping: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len,
-        @Nullable byte[] outBuf, int outOff, int outLen) {
-        IgfsInputStream stream = delegate.target();
-
-        try {
-            byte[] res = null;
-
-            if (outBuf != null) {
-                int outTailLen = outBuf.length - outOff;
-
-                if (len <= outTailLen)
-                    stream.readFully(pos, outBuf, outOff, len);
-                else {
-                    stream.readFully(pos, outBuf, outOff, outTailLen);
-
-                    int remainderLen = len - outTailLen;
-
-                    res = new byte[remainderLen];
-
-                    stream.readFully(pos, res, 0, remainderLen);
-                }
-            } else {
-                res = new byte[len];
-
-                stream.readFully(pos, res, 0, len);
-            }
-
-            return new GridFinishedFuture<>(res);
-        }
-        catch (IllegalStateException | IOException e) {
-            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
-
-            if (lsnr != null)
-                lsnr.onError(e.getMessage());
-
-            return new GridFinishedFuture<>(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len)
-        throws IOException {
-        try {
-            IgfsOutputStream stream = delegate.target();
-
-            stream.write(data, off, len);
-        }
-        catch (IllegalStateException | IOException e) {
-            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
-
-            if (lsnr != null)
-                lsnr.onError(e.getMessage());
-
-            if (e instanceof IllegalStateException)
-                throw new IOException("Failed to write data to IGFS stream because Grid is stopping.", e);
-            else
-                throw e;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException {
-        try {
-            IgfsOutputStream stream = delegate.target();
-
-            stream.flush();
-        }
-        catch (IllegalStateException | IOException e) {
-            HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
-
-            if (lsnr != null)
-                lsnr.onError(e.getMessage());
-
-            if (e instanceof IllegalStateException)
-                throw new IOException("Failed to flush data to IGFS stream because Grid is stopping.", e);
-            else
-                throw e;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException {
-        Closeable closeable = desc.target();
-
-        try {
-            closeable.close();
-        }
-        catch (IllegalStateException e) {
-            throw new IOException("Failed to close IGFS stream because Grid is stopping.", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addEventListener(HadoopIgfsStreamDelegate delegate,
-        HadoopIgfsStreamEventListener lsnr) {
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(delegate, lsnr);
-
-        assert lsnr0 == null || lsnr0 == lsnr;
-
-        if (log.isDebugEnabled())
-            log.debug("Added stream event listener [delegate=" + delegate + ']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public void removeEventListener(HadoopIgfsStreamDelegate delegate) {
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(delegate);
-
-        if (lsnr0 != null && log.isDebugEnabled())
-            log.debug("Removed stream event listener [delegate=" + delegate + ']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public String user() {
-        return user;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
deleted file mode 100644
index 46b46d7..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.NotNull;
-
-/**
- * IGFS input stream wrapper for hadoop interfaces.
- */
-@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-public final class HadoopIgfsInputStream extends InputStream implements Seekable, PositionedReadable,
-    HadoopIgfsStreamEventListener {
-    /** Minimum buffer size. */
-    private static final int MIN_BUF_SIZE = 4 * 1024;
-
-    /** Server stream delegate. */
-    private HadoopIgfsStreamDelegate delegate;
-
-    /** Stream ID used by logger. */
-    private long logStreamId;
-
-    /** Stream position. */
-    private long pos;
-
-    /** Stream read limit. */
-    private long limit;
-
-    /** Mark position. */
-    private long markPos = -1;
-
-    /** Prefetch buffer. */
-    private DoubleFetchBuffer buf = new DoubleFetchBuffer();
-
-    /** Buffer half size for double-buffering. */
-    private int bufHalfSize;
-
-    /** Closed flag. */
-    private volatile boolean closed;
-
-    /** Flag set if stream was closed due to connection breakage. */
-    private boolean connBroken;
-
-    /** Logger. */
-    private Log log;
-
-    /** Client logger. */
-    private IgfsLogger clientLog;
-
-    /** Read time. */
-    private long readTime;
-
-    /** User time. */
-    private long userTime;
-
-    /** Last timestamp. */
-    private long lastTs;
-
-    /** Amount of read bytes. */
-    private long total;
-
-    /**
-     * Creates input stream.
-     *
-     * @param delegate Server stream delegate.
-     * @param limit Read limit.
-     * @param bufSize Buffer size.
-     * @param log Log.
-     * @param clientLog Client logger.
-     */
-    public HadoopIgfsInputStream(HadoopIgfsStreamDelegate delegate, long limit, int bufSize, Log log,
-        IgfsLogger clientLog, long logStreamId) {
-        assert limit >= 0;
-
-        this.delegate = delegate;
-        this.limit = limit;
-        this.log = log;
-        this.clientLog = clientLog;
-        this.logStreamId = logStreamId;
-
-        bufHalfSize = Math.max(bufSize, MIN_BUF_SIZE);
-
-        lastTs = System.nanoTime();
-
-        delegate.hadoop().addEventListener(delegate, this);
-    }
-
-    /**
-     * Read start.
-     */
-    private void readStart() {
-        long now = System.nanoTime();
-
-        userTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /**
-     * Read end.
-     */
-    private void readEnd() {
-        long now = System.nanoTime();
-
-        readTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read() throws IOException {
-        checkClosed();
-
-        readStart();
-
-        try {
-            if (eof())
-                return -1;
-
-            buf.refreshAhead(pos);
-
-            int res = buf.atPosition(pos);
-
-            pos++;
-            total++;
-
-            buf.refreshAhead(pos);
-
-            return res;
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read(@NotNull byte[] b, int off, int len) throws IOException {
-        checkClosed();
-
-        if (eof())
-            return -1;
-
-        readStart();
-
-        try {
-            long remaining = limit - pos;
-
-            int read = buf.flatten(b, pos, off, len);
-
-            pos += read;
-            total += read;
-            remaining -= read;
-
-            if (remaining > 0 && read != len) {
-                int readAmt = (int)Math.min(remaining, len - read);
-
-                delegate.hadoop().readData(delegate, pos, readAmt, b, off + read, len - read).get();
-
-                read += readAmt;
-                pos += readAmt;
-                total += readAmt;
-            }
-
-            buf.refreshAhead(pos);
-
-            return read;
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized long skip(long n) throws IOException {
-        checkClosed();
-
-        if (clientLog.isLogEnabled())
-            clientLog.logSkip(logStreamId, n);
-
-        long oldPos = pos;
-
-        if (pos + n <= limit)
-            pos += n;
-        else
-            pos = limit;
-
-        buf.refreshAhead(pos);
-
-        return pos - oldPos;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int available() throws IOException {
-        checkClosed();
-
-        int available = buf.available(pos);
-
-        assert available >= 0;
-
-        return available;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void close() throws IOException {
-        if (!closed) {
-            readStart();
-
-            if (log.isDebugEnabled())
-                log.debug("Closing input stream: " + delegate);
-
-            delegate.hadoop().closeStream(delegate);
-
-            readEnd();
-
-            if (clientLog.isLogEnabled())
-                clientLog.logCloseIn(logStreamId, userTime, readTime, total);
-
-            markClosed(false);
-
-            if (log.isDebugEnabled())
-                log.debug("Closed stream [delegate=" + delegate + ", readTime=" + readTime +
-                    ", userTime=" + userTime + ']');
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void mark(int readLimit) {
-        markPos = pos;
-
-        if (clientLog.isLogEnabled())
-            clientLog.logMark(logStreamId, readLimit);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void reset() throws IOException {
-        checkClosed();
-
-        if (clientLog.isLogEnabled())
-            clientLog.logReset(logStreamId);
-
-        if (markPos == -1)
-            throw new IOException("Stream was not marked.");
-
-        pos = markPos;
-
-        buf.refreshAhead(pos);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean markSupported() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read(long position, byte[] buf, int off, int len) throws IOException {
-        long remaining = limit - position;
-
-        int read = (int)Math.min(len, remaining);
-
-        // Return -1 at EOF.
-        if (read == 0)
-            return -1;
-
-        readFully(position, buf, off, read);
-
-        return read;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void readFully(long position, byte[] buf, int off, int len) throws IOException {
-        long remaining = limit - position;
-
-        checkClosed();
-
-        if (len > remaining)
-            throw new EOFException("End of stream reached before data was fully read.");
-
-        readStart();
-
-        try {
-            int read = this.buf.flatten(buf, position, off, len);
-
-            total += read;
-
-            if (read != len) {
-                int readAmt = len - read;
-
-                delegate.hadoop().readData(delegate, position + read, readAmt, buf, off + read, readAmt).get();
-
-                total += readAmt;
-            }
-
-            if (clientLog.isLogEnabled())
-                clientLog.logRandomRead(logStreamId, position, len);
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFully(long position, byte[] buf) throws IOException {
-        readFully(position, buf, 0, buf.length);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void seek(long pos) throws IOException {
-        A.ensure(pos >= 0, "position must be non-negative");
-
-        checkClosed();
-
-        if (clientLog.isLogEnabled())
-            clientLog.logSeek(logStreamId, pos);
-
-        if (pos > limit)
-            pos = limit;
-
-        if (log.isDebugEnabled())
-            log.debug("Seek to position [delegate=" + delegate + ", pos=" + pos + ", oldPos=" + this.pos + ']');
-
-        this.pos = pos;
-
-        buf.refreshAhead(pos);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized long getPos() {
-        return pos;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized boolean seekToNewSource(long targetPos) {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onClose() {
-        markClosed(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onError(String errMsg) {
-        // No-op.
-    }
-
-    /**
-     * Marks stream as closed.
-     *
-     * @param connBroken {@code True} if connection with server was lost.
-     */
-    private void markClosed(boolean connBroken) {
-        // It is ok to have race here.
-        if (!closed) {
-            closed = true;
-
-            this.connBroken = connBroken;
-
-            delegate.hadoop().removeEventListener(delegate);
-        }
-    }
-
-    /**
-     * @throws IOException If check failed.
-     */
-    private void checkClosed() throws IOException {
-        if (closed) {
-            if (connBroken)
-                throw new IOException("Server connection was lost.");
-            else
-                throw new IOException("Stream is closed.");
-        }
-    }
-
-    /**
-     * @return {@code True} if end of stream reached.
-     */
-    private boolean eof() {
-        return limit == pos;
-    }
-
-    /**
-     * Asynchronous prefetch buffer.
-     */
-    private static class FetchBufferPart {
-        /** Read future. */
-        private IgniteInternalFuture<byte[]> readFut;
-
-        /** Position of cached chunk in file. */
-        private long pos;
-
-        /** Prefetch length. Need to store as read future result might be not available yet. */
-        private int len;
-
-        /**
-         * Creates fetch buffer part.
-         *
-         * @param readFut Read future for this buffer.
-         * @param pos Read position.
-         * @param len Chunk length.
-         */
-        private FetchBufferPart(IgniteInternalFuture<byte[]> readFut, long pos, int len) {
-            this.readFut = readFut;
-            this.pos = pos;
-            this.len = len;
-        }
-
-        /**
-         * Copies cached data if specified position matches cached region.
-         *
-         * @param dst Destination buffer.
-         * @param pos Read position in file.
-         * @param dstOff Offset in destination buffer from which start writing.
-         * @param len Maximum number of bytes to copy.
-         * @return Number of bytes copied.
-         * @throws IgniteCheckedException If read future failed.
-         */
-        public int flatten(byte[] dst, long pos, int dstOff, int len) throws IgniteCheckedException {
-            // If read start position is within cached boundaries.
-            if (contains(pos)) {
-                byte[] data = readFut.get();
-
-                int srcPos = (int)(pos - this.pos);
-                int cpLen = Math.min(len, data.length - srcPos);
-
-                U.arrayCopy(data, srcPos, dst, dstOff, cpLen);
-
-                return cpLen;
-            }
-
-            return 0;
-        }
-
-        /**
-         * @return {@code True} if data is ready to be read.
-         */
-        public boolean ready() {
-            return readFut.isDone();
-        }
-
-        /**
-         * Checks if current buffer part contains given position.
-         *
-         * @param pos Position to check.
-         * @return {@code True} if position matches buffer region.
-         */
-        public boolean contains(long pos) {
-            return this.pos <= pos && this.pos + len > pos;
-        }
-    }
-
-    private class DoubleFetchBuffer {
-        /**  */
-        private FetchBufferPart first;
-
-        /** */
-        private FetchBufferPart second;
-
-        /**
-         * Copies fetched data from both buffers to destination array if cached region matched read position.
-         *
-         * @param dst Destination buffer.
-         * @param pos Read position in file.
-         * @param dstOff Destination buffer offset.
-         * @param len Maximum number of bytes to copy.
-         * @return Number of bytes copied.
-         * @throws IgniteCheckedException If any read operation failed.
-         */
-        public int flatten(byte[] dst, long pos, int dstOff, int len) throws IgniteCheckedException {
-            assert dstOff >= 0;
-            assert dstOff + len <= dst.length : "Invalid indices [dst.length=" + dst.length + ", dstOff=" + dstOff +
-                ", len=" + len + ']';
-
-            int bytesCopied = 0;
-
-            if (first != null) {
-                bytesCopied += first.flatten(dst, pos, dstOff, len);
-
-                if (bytesCopied != len && second != null) {
-                    assert second.pos == first.pos + first.len;
-
-                    bytesCopied += second.flatten(dst, pos + bytesCopied, dstOff + bytesCopied, len - bytesCopied);
-                }
-            }
-
-            return bytesCopied;
-        }
-
-        /**
-         * Gets byte at specified position in buffer.
-         *
-         * @param pos Stream position.
-         * @return Read byte.
-         * @throws IgniteCheckedException If read failed.
-         */
-        public int atPosition(long pos) throws IgniteCheckedException {
-            // Should not reach here if stream contains no data.
-            assert first != null;
-
-            if (first.contains(pos)) {
-                byte[] bytes = first.readFut.get();
-
-                return bytes[((int)(pos - first.pos))] & 0xFF;
-            }
-            else {
-                assert second != null;
-                assert second.contains(pos);
-
-                byte[] bytes = second.readFut.get();
-
-                return bytes[((int)(pos - second.pos))] & 0xFF;
-            }
-        }
-
-        /**
-         * Starts asynchronous buffer refresh if needed, depending on current position.
-         *
-         * @param pos Current stream position.
-         */
-        public void refreshAhead(long pos) {
-            if (fullPrefetch(pos)) {
-                first = fetch(pos, bufHalfSize);
-                second = fetch(pos + bufHalfSize, bufHalfSize);
-            }
-            else if (needFlip(pos)) {
-                first = second;
-
-                second = fetch(first.pos + first.len, bufHalfSize);
-            }
-        }
-
-        /**
-         * @param pos Position from which read is expected.
-         * @return Number of bytes available to be read without blocking.
-         */
-        public int available(long pos) {
-            int available = 0;
-
-            if (first != null) {
-                if (first.contains(pos)) {
-                    if (first.ready()) {
-                        available += (pos - first.pos);
-
-                        if (second != null && second.ready())
-                            available += second.len;
-                    }
-                }
-                else {
-                    if (second != null && second.contains(pos) && second.ready())
-                        available += (pos - second.pos);
-                }
-            }
-
-            return available;
-        }
-
-        /**
-         * Checks if position shifted enough to forget previous buffer.
-         *
-         * @param pos Current position.
-         * @return {@code True} if need flip buffers.
-         */
-        private boolean needFlip(long pos) {
-            // Return true if we read more then half of second buffer.
-            return second != null && second.contains(pos);
-        }
-
-        /**
-         * Determines if all cached bytes should be discarded and new region should be
-         * prefetched.
-         *
-         * @param curPos Current stream position.
-         * @return {@code True} if need to refresh both blocks.
-         */
-        private boolean fullPrefetch(long curPos) {
-            // If no data was prefetched yet, return true.
-            return first == null || curPos < first.pos || (second != null && curPos >= second.pos + second.len);
-        }
-
-        /**
-         * Starts asynchronous fetch for given region.
-         *
-         * @param pos Position to read from.
-         * @param size Number of bytes to read.
-         * @return Fetch buffer part.
-         */
-        private FetchBufferPart fetch(long pos, int size) {
-            long remaining = limit - pos;
-
-            size = (int)Math.min(size, remaining);
-
-            return size <= 0 ? null :
-                new FetchBufferPart(delegate.hadoop().readData(delegate, pos, size, null, 0, 0), pos, size);
-        }
-    }
-}
\ No newline at end of file


[39/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
new file mode 100644
index 0000000..17c2ff5
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.util.nio.GridNioFuture;
+import org.apache.ignite.internal.util.nio.GridNioSession;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Grid client for NIO server.
+ */
+public class HadoopTcpNioCommunicationClient extends HadoopAbstractCommunicationClient {
+    /** Socket. */
+    private final GridNioSession ses;
+
+    /**
+     * Constructor for test purposes only.
+     */
+    public HadoopTcpNioCommunicationClient() {
+        ses = null;
+    }
+
+    /**
+     * @param ses Session.
+     */
+    public HadoopTcpNioCommunicationClient(GridNioSession ses) {
+        assert ses != null;
+
+        this.ses = ses;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean close() {
+        boolean res = super.close();
+
+        if (res)
+            ses.close();
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void forceClose() {
+        super.forceClose();
+
+        ses.close();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg)
+        throws IgniteCheckedException {
+        if (closed())
+            throw new IgniteCheckedException("Client was closed: " + this);
+
+        GridNioFuture<?> fut = ses.send(msg);
+
+        if (fut.isDone())
+            fut.get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getIdleTime() {
+        long now = U.currentTimeMillis();
+
+        // Session can be used for receiving and sending.
+        return Math.min(Math.min(now - ses.lastReceiveTime(), now - ses.lastSendScheduleTime()),
+            now - ses.lastSendTime());
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopTcpNioCommunicationClient.class, this, super.toString());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
new file mode 100644
index 0000000..750b314
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.IOException;
+import org.apache.hadoop.mapred.JobContext;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+
+/**
+ * Hadoop cleanup task implementation for v1 API.
+ */
+public class HadoopV1CleanupTask extends HadoopV1Task {
+    /** Abort flag. */
+    private final boolean abort;
+
+    /**
+     * @param taskInfo Task info.
+     * @param abort Abort flag.
+     */
+    public HadoopV1CleanupTask(HadoopTaskInfo taskInfo, boolean abort) {
+        super(taskInfo);
+
+        this.abort = abort;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
+
+        JobContext jobCtx = ctx.jobContext();
+
+        try {
+            OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();
+
+            if (abort)
+                committer.abortJob(jobCtx, JobStatus.State.FAILED);
+            else
+                committer.commitJob(jobCtx);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
new file mode 100644
index 0000000..c623eab
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Counter;
+
+import static org.apache.hadoop.mapreduce.util.CountersStrings.toEscapedCompactString;
+
+/**
+ * Hadoop counter implementation for v1 API.
+ */
+public class HadoopV1Counter extends Counters.Counter {
+    /** Delegate. */
+    private final HadoopLongCounter cntr;
+
+    /**
+     * Creates new instance.
+     *
+     * @param cntr Delegate counter.
+     */
+    public HadoopV1Counter(HadoopLongCounter cntr) {
+        this.cntr = cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDisplayName(String displayName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getName() {
+        return cntr.name();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getDisplayName() {
+        return getName();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getValue() {
+        return cntr.value();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setValue(long val) {
+        cntr.value(val);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void increment(long incr) {
+        cntr.increment(incr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public String makeEscapedCompactString() {
+        return toEscapedCompactString(new HadoopV2Counter(cntr));
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override public boolean contentEquals(Counters.Counter cntr) {
+        return getUnderlyingCounter().equals(cntr.getUnderlyingCounter());
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getCounter() {
+        return cntr.value();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter getUnderlyingCounter() {
+        return this;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
new file mode 100644
index 0000000..fb2266a
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Mapper;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+
+/**
+ * Hadoop map task implementation for v1 API.
+ */
+public class HadoopV1MapTask extends HadoopV1Task {
+    /** */
+    private static final String[] EMPTY_HOSTS = new String[0];
+
+    /**
+     * Constructor.
+     *
+     * @param taskInfo 
+     */
+    public HadoopV1MapTask(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        HadoopJob job = taskCtx.job();
+
+        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
+
+        JobConf jobConf = ctx.jobConf();
+
+        InputFormat inFormat = jobConf.getInputFormat();
+
+        HadoopInputSplit split = info().inputSplit();
+
+        InputSplit nativeSplit;
+
+        if (split instanceof HadoopFileBlock) {
+            HadoopFileBlock block = (HadoopFileBlock)split;
+
+            nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS);
+        }
+        else
+            nativeSplit = (InputSplit)ctx.getNativeSplit(split);
+
+        assert nativeSplit != null;
+
+        Reporter reporter = new HadoopV1Reporter(taskCtx);
+
+        HadoopV1OutputCollector collector = null;
+
+        try {
+            collector = collector(jobConf, ctx, !job.info().hasCombiner() && !job.info().hasReducer(),
+                fileName(), ctx.attemptId());
+
+            RecordReader reader = inFormat.getRecordReader(nativeSplit, jobConf, reporter);
+
+            Mapper mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), jobConf);
+
+            Object key = reader.createKey();
+            Object val = reader.createValue();
+
+            assert mapper != null;
+
+            try {
+                try {
+                    while (reader.next(key, val)) {
+                        if (isCancelled())
+                            throw new HadoopTaskCancelledException("Map task cancelled.");
+
+                        mapper.map(key, val, collector, reporter);
+                    }
+                }
+                finally {
+                    mapper.close();
+                }
+            }
+            finally {
+                collector.closeWriter();
+            }
+
+            collector.commit();
+        }
+        catch (Exception e) {
+            if (collector != null)
+                collector.abort();
+
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
new file mode 100644
index 0000000..37f81a6
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.IOException;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.TaskAttemptContext;
+import org.apache.hadoop.mapred.TaskAttemptContextImpl;
+import org.apache.hadoop.mapred.TaskAttemptID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop output collector.
+ */
+public class HadoopV1OutputCollector implements OutputCollector {
+    /** Job configuration. */
+    private final JobConf jobConf;
+
+    /** Task context. */
+    private final HadoopTaskContext taskCtx;
+
+    /** Optional direct writer. */
+    private final RecordWriter writer;
+
+    /** Task attempt. */
+    private final TaskAttemptID attempt;
+
+    /**
+     * @param jobConf Job configuration.
+     * @param taskCtx Task context.
+     * @param directWrite Direct write flag.
+     * @param fileName File name.
+     * @throws IOException In case of IO exception.
+     */
+    HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
+        @Nullable String fileName, TaskAttemptID attempt) throws IOException {
+        this.jobConf = jobConf;
+        this.taskCtx = taskCtx;
+        this.attempt = attempt;
+
+        if (directWrite) {
+            jobConf.set("mapreduce.task.attempt.id", attempt.toString());
+
+            OutputFormat outFormat = jobConf.getOutputFormat();
+
+            writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
+        }
+        else
+            writer = null;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void collect(Object key, Object val) throws IOException {
+        if (writer != null)
+            writer.write(key, val);
+        else {
+            try {
+                taskCtx.output().write(key, val);
+            }
+            catch (IgniteCheckedException e) {
+                throw new IOException(e);
+            }
+        }
+    }
+
+    /**
+     * Close writer.
+     *
+     * @throws IOException In case of IO exception.
+     */
+    public void closeWriter() throws IOException {
+        if (writer != null)
+            writer.close(Reporter.NULL);
+    }
+
+    /**
+     * Setup task.
+     *
+     * @throws IOException If failed.
+     */
+    public void setup() throws IOException {
+        if (writer != null)
+            jobConf.getOutputCommitter().setupTask(new TaskAttemptContextImpl(jobConf, attempt));
+    }
+
+    /**
+     * Commit task.
+     *
+     * @throws IOException In failed.
+     */
+    public void commit() throws IOException {
+        if (writer != null) {
+            OutputCommitter outputCommitter = jobConf.getOutputCommitter();
+
+            TaskAttemptContext taskCtx = new TaskAttemptContextImpl(jobConf, attempt);
+
+            if (outputCommitter.needsTaskCommit(taskCtx))
+                outputCommitter.commitTask(taskCtx);
+        }
+    }
+
+    /**
+     * Abort task.
+     */
+    public void abort() {
+        try {
+            if (writer != null)
+                jobConf.getOutputCommitter().abortTask(new TaskAttemptContextImpl(jobConf, attempt));
+        }
+        catch (IOException ignore) {
+            // No-op.
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
new file mode 100644
index 0000000..0ab1bba
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.Partitioner;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
+
+/**
+ * Hadoop partitioner adapter for v1 API.
+ */
+public class HadoopV1Partitioner implements HadoopPartitioner {
+    /** Partitioner instance. */
+    private Partitioner<Object, Object> part;
+
+    /**
+     * @param cls Hadoop partitioner class.
+     * @param conf Job configuration.
+     */
+    public HadoopV1Partitioner(Class<? extends Partitioner> cls, Configuration conf) {
+        part = (Partitioner<Object, Object>) ReflectionUtils.newInstance(cls, conf);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int partition(Object key, Object val, int parts) {
+        return part.getPartition(key, val, parts);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
new file mode 100644
index 0000000..e656695
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+
+/**
+ * Hadoop reduce task implementation for v1 API.
+ */
+public class HadoopV1ReduceTask extends HadoopV1Task {
+    /** {@code True} if reduce, {@code false} if combine. */
+    private final boolean reduce;
+
+    /**
+     * Constructor.
+     *
+     * @param taskInfo Task info.
+     * @param reduce {@code True} if reduce, {@code false} if combine.
+     */
+    public HadoopV1ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) {
+        super(taskInfo);
+
+        this.reduce = reduce;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        HadoopJob job = taskCtx.job();
+
+        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
+
+        JobConf jobConf = ctx.jobConf();
+
+        HadoopTaskInput input = taskCtx.input();
+
+        HadoopV1OutputCollector collector = null;
+
+        try {
+            collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId());
+
+            Reducer reducer;
+            if (reduce) reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(),
+                jobConf);
+            else reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(),
+                jobConf);
+
+            assert reducer != null;
+
+            try {
+                try {
+                    while (input.next()) {
+                        if (isCancelled())
+                            throw new HadoopTaskCancelledException("Reduce task cancelled.");
+
+                        reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
+                    }
+                }
+                finally {
+                    reducer.close();
+                }
+            }
+            finally {
+                collector.closeWriter();
+            }
+
+            collector.commit();
+        }
+        catch (Exception e) {
+            if (collector != null)
+                collector.abort();
+
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
new file mode 100644
index 0000000..5a63aab
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
+
+/**
+ * Hadoop reporter implementation for v1 API.
+ */
+public class HadoopV1Reporter implements Reporter {
+    /** Context. */
+    private final HadoopTaskContext ctx;
+
+    /**
+     * Creates new instance.
+     *
+     * @param ctx Context.
+     */
+    public HadoopV1Reporter(HadoopTaskContext ctx) {
+        this.ctx = ctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setStatus(String status) {
+        // TODO
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counters.Counter getCounter(Enum<?> name) {
+        return getCounter(name.getDeclaringClass().getName(), name.name());
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counters.Counter getCounter(String grp, String name) {
+        return new HadoopV1Counter(ctx.counter(grp, name, HadoopLongCounter.class));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void incrCounter(Enum<?> key, long amount) {
+        getCounter(key).increment(amount);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void incrCounter(String grp, String cntr, long amount) {
+        getCounter(grp, cntr).increment(amount);
+    }
+
+    /** {@inheritDoc} */
+    @Override public InputSplit getInputSplit() throws UnsupportedOperationException {
+        throw new UnsupportedOperationException("reporter has no input"); // TODO
+    }
+
+    /** {@inheritDoc} */
+    @Override public float getProgress() {
+        return 0.5f; // TODO
+    }
+
+    /** {@inheritDoc} */
+    @Override public void progress() {
+        // TODO
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
new file mode 100644
index 0000000..d2f6823
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.IOException;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+
+/**
+ * Hadoop setup task implementation for v1 API.
+ */
+public class HadoopV1SetupTask extends HadoopV1Task {
+    /**
+     * Constructor.
+     *
+     * @param taskInfo Task info.
+     */
+    public HadoopV1SetupTask(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
+
+        try {
+            ctx.jobConf().getOutputFormat().checkOutputSpecs(null, ctx.jobConf());
+
+            OutputCommitter committer = ctx.jobConf().getOutputCommitter();
+
+            if (committer != null)
+                committer.setupJob(ctx.jobContext());
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
new file mode 100644
index 0000000..203def4
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop API v1 splitter.
+ */
+public class HadoopV1Splitter {
+    /** */
+    private static final String[] EMPTY_HOSTS = {};
+
+    /**
+     * @param jobConf Job configuration.
+     * @return Collection of mapped splits.
+     * @throws IgniteCheckedException If mapping failed.
+     */
+    public static Collection<HadoopInputSplit> splitJob(JobConf jobConf) throws IgniteCheckedException {
+        try {
+            InputFormat<?, ?> format = jobConf.getInputFormat();
+
+            assert format != null;
+
+            InputSplit[] splits = format.getSplits(jobConf, 0);
+
+            Collection<HadoopInputSplit> res = new ArrayList<>(splits.length);
+
+            for (int i = 0; i < splits.length; i++) {
+                InputSplit nativeSplit = splits[i];
+
+                if (nativeSplit instanceof FileSplit) {
+                    FileSplit s = (FileSplit)nativeSplit;
+
+                    res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
+                }
+                else
+                    res.add(HadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations()));
+            }
+
+            return res;
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /**
+     * @param clsName Input split class name.
+     * @param in Input stream.
+     * @param hosts Optional hosts.
+     * @return File block or {@code null} if it is not a {@link FileSplit} instance.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public static HadoopFileBlock readFileBlock(String clsName, FSDataInputStream in,
+        @Nullable String[] hosts) throws IgniteCheckedException {
+        if (!FileSplit.class.getName().equals(clsName))
+            return null;
+
+        FileSplit split = U.newInstance(FileSplit.class);
+
+        try {
+            split.readFields(in);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+
+        if (hosts == null)
+            hosts = EMPTY_HOSTS;
+
+        return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
new file mode 100644
index 0000000..a89323c
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v1;
+
+import java.io.IOException;
+import java.text.NumberFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.TaskAttemptID;
+import org.apache.ignite.internal.processors.hadoop.HadoopTask;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Extended Hadoop v1 task.
+ */
+public abstract class HadoopV1Task extends HadoopTask {
+    /** Indicates that this task is to be cancelled. */
+    private volatile boolean cancelled;
+
+    /**
+     * Constructor.
+     *
+     * @param taskInfo Task info.
+     */
+    protected HadoopV1Task(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /**
+     * Gets file name for that task result.
+     *
+     * @return File name.
+     */
+    public String fileName() {
+        NumberFormat numFormat = NumberFormat.getInstance();
+
+        numFormat.setMinimumIntegerDigits(5);
+        numFormat.setGroupingUsed(false);
+
+        return "part-" + numFormat.format(info().taskNumber());
+    }
+
+    /**
+     *
+     * @param jobConf Job configuration.
+     * @param taskCtx Task context.
+     * @param directWrite Direct write flag.
+     * @param fileName File name.
+     * @param attempt Attempt of task.
+     * @return Collector.
+     * @throws IOException In case of IO exception.
+     */
+    protected HadoopV1OutputCollector collector(JobConf jobConf, HadoopV2TaskContext taskCtx,
+        boolean directWrite, @Nullable String fileName, TaskAttemptID attempt) throws IOException {
+        HadoopV1OutputCollector collector = new HadoopV1OutputCollector(jobConf, taskCtx, directWrite,
+            fileName, attempt) {
+            /** {@inheritDoc} */
+            @Override public void collect(Object key, Object val) throws IOException {
+                if (cancelled)
+                    throw new HadoopTaskCancelledException("Task cancelled.");
+
+                super.collect(key, val);
+            }
+        };
+
+        collector.setup();
+
+        return collector;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancel() {
+        cancelled = true;
+    }
+
+    /** Returns true if task is cancelled. */
+    public boolean isCancelled() {
+        return cancelled;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
new file mode 100644
index 0000000..9632525
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopDaemon.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.util.Collection;
+import java.util.LinkedList;
+
+/**
+ * Replacement for Hadoop {@code org.apache.hadoop.util.Daemon} class.
+ */
+@SuppressWarnings("UnusedDeclaration")
+public class HadoopDaemon extends Thread {
+    /** Lock object used for synchronization. */
+    private static final Object lock = new Object();
+
+    /** Collection to hold the threads to be stopped. */
+    private static Collection<HadoopDaemon> daemons = new LinkedList<>();
+
+    {
+        setDaemon(true); // always a daemon
+    }
+
+    /** Runnable of this thread, may be this. */
+    final Runnable runnable;
+
+    /**
+     * Construct a daemon thread.
+     */
+    public HadoopDaemon() {
+        super();
+
+        runnable = this;
+
+        enqueueIfNeeded();
+    }
+
+    /**
+     * Construct a daemon thread.
+     */
+    public HadoopDaemon(Runnable runnable) {
+        super(runnable);
+
+        this.runnable = runnable;
+
+        this.setName(runnable.toString());
+
+        enqueueIfNeeded();
+    }
+
+    /**
+     * Construct a daemon thread to be part of a specified thread group.
+     */
+    public HadoopDaemon(ThreadGroup grp, Runnable runnable) {
+        super(grp, runnable);
+
+        this.runnable = runnable;
+
+        this.setName(runnable.toString());
+
+        enqueueIfNeeded();
+    }
+
+    /**
+     * Getter for the runnable. May return this.
+     *
+     * @return the runnable
+     */
+    public Runnable getRunnable() {
+        return runnable;
+    }
+
+    /**
+     * if the runnable is a Hadoop org.apache.hadoop.hdfs.PeerCache Runnable.
+     *
+     * @param r the runnable.
+     * @return true if it is.
+     */
+    private static boolean isPeerCacheRunnable(Runnable r) {
+        String name = r.getClass().getName();
+
+        return name.startsWith("org.apache.hadoop.hdfs.PeerCache");
+    }
+
+    /**
+     * Enqueue this thread if it should be stopped upon the task end.
+     */
+    private void enqueueIfNeeded() {
+        synchronized (lock) {
+            if (daemons == null)
+                throw new RuntimeException("Failed to create HadoopDaemon (its registry is already cleared): " +
+                    "[classLoader=" + getClass().getClassLoader() + ']');
+
+            if (runnable.getClass().getClassLoader() == getClass().getClassLoader() && isPeerCacheRunnable(runnable))
+                daemons.add(this);
+        }
+    }
+
+    /**
+     * Stops all the registered threads.
+     */
+    public static void dequeueAndStopAll() {
+        synchronized (lock) {
+            if (daemons != null) {
+                for (HadoopDaemon daemon : daemons)
+                    daemon.interrupt();
+
+                daemons = null;
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
new file mode 100644
index 0000000..c7e8a0a
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+
+/**
+ * Split serialized in external file.
+ */
+public class HadoopExternalSplit extends HadoopInputSplit {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long off;
+
+    /**
+     * For {@link Externalizable}.
+     */
+    public HadoopExternalSplit() {
+        // No-op.
+    }
+
+    /**
+     * @param hosts Hosts.
+     * @param off Offset of this split in external file.
+     */
+    public HadoopExternalSplit(String[] hosts, long off) {
+        assert off >= 0 : off;
+        assert hosts != null;
+
+        this.hosts = hosts;
+        this.off = off;
+    }
+
+    /**
+     * @return Offset of this input split in external file.
+     */
+    public long offset() {
+        return off;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeLong(off);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        off = in.readLong();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        HadoopExternalSplit that = (HadoopExternalSplit) o;
+
+        return off == that.off;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return (int)(off ^ (off >>> 32));
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
new file mode 100644
index 0000000..844e7f8
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.Serialization;
+import org.apache.hadoop.io.serializer.Serializer;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * The wrapper around external serializer.
+ */
+public class HadoopSerializationWrapper<T> implements HadoopSerialization {
+    /** External serializer - writer. */
+    private final Serializer<T> serializer;
+
+    /** External serializer - reader. */
+    private final Deserializer<T> deserializer;
+
+    /** Data output for current write operation. */
+    private OutputStream currOut;
+
+    /** Data input for current read operation. */
+    private InputStream currIn;
+
+    /** Wrapper around current output to provide OutputStream interface. */
+    private final OutputStream outStream = new OutputStream() {
+        /** {@inheritDoc} */
+        @Override public void write(int b) throws IOException {
+            currOut.write(b);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(byte[] b, int off, int len) throws IOException {
+            currOut.write(b, off, len);
+        }
+    };
+
+    /** Wrapper around current input to provide InputStream interface. */
+    private final InputStream inStream = new InputStream() {
+        /** {@inheritDoc} */
+        @Override public int read() throws IOException {
+            return currIn.read();
+        }
+
+        /** {@inheritDoc} */
+        @Override public int read(byte[] b, int off, int len) throws IOException {
+            return currIn.read(b, off, len);
+        }
+    };
+
+    /**
+     * @param serialization External serializer to wrap.
+     * @param cls The class to serialize.
+     */
+    public HadoopSerializationWrapper(Serialization<T> serialization, Class<T> cls) throws IgniteCheckedException {
+        assert cls != null;
+
+        serializer = serialization.getSerializer(cls);
+        deserializer = serialization.getDeserializer(cls);
+
+        try {
+            serializer.open(outStream);
+            deserializer.open(inStream);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(DataOutput out, Object obj) throws IgniteCheckedException {
+        assert out != null;
+        assert obj != null;
+
+        try {
+            currOut = (OutputStream)out;
+
+            serializer.serialize((T)obj);
+
+            currOut = null;
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Object read(DataInput in, @Nullable Object obj) throws IgniteCheckedException {
+        assert in != null;
+
+        try {
+            currIn = (InputStream)in;
+
+            T res = deserializer.deserialize((T) obj);
+
+            currIn = null;
+
+            return res;
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IgniteCheckedException {
+        try {
+            serializer.close();
+            deserializer.close();
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
new file mode 100644
index 0000000..8bd71e0
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Fake manager for shutdown hooks.
+ */
+public class HadoopShutdownHookManager {
+    /** */
+    private static final HadoopShutdownHookManager MGR = new HadoopShutdownHookManager();
+
+    /**
+     * Return <code>ShutdownHookManager</code> singleton.
+     *
+     * @return <code>ShutdownHookManager</code> singleton.
+     */
+    public static HadoopShutdownHookManager get() {
+        return MGR;
+    }
+
+    /** */
+    private Set<Runnable> hooks = Collections.synchronizedSet(new HashSet<Runnable>());
+
+    /** */
+    private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
+
+    /**
+     * Singleton.
+     */
+    private HadoopShutdownHookManager() {
+        // No-op.
+    }
+
+    /**
+     * Adds a shutdownHook with a priority, the higher the priority
+     * the earlier will run. ShutdownHooks with same priority run
+     * in a non-deterministic order.
+     *
+     * @param shutdownHook shutdownHook <code>Runnable</code>
+     * @param priority priority of the shutdownHook.
+     */
+    public void addShutdownHook(Runnable shutdownHook, int priority) {
+        if (shutdownHook == null)
+            throw new IllegalArgumentException("shutdownHook cannot be NULL");
+
+        hooks.add(shutdownHook);
+    }
+
+    /**
+     * Removes a shutdownHook.
+     *
+     * @param shutdownHook shutdownHook to remove.
+     * @return TRUE if the shutdownHook was registered and removed,
+     * FALSE otherwise.
+     */
+    public boolean removeShutdownHook(Runnable shutdownHook) {
+        return hooks.remove(shutdownHook);
+    }
+
+    /**
+     * Indicates if a shutdownHook is registered or not.
+     *
+     * @param shutdownHook shutdownHook to check if registered.
+     * @return TRUE/FALSE depending if the shutdownHook is is registered.
+     */
+    public boolean hasShutdownHook(Runnable shutdownHook) {
+        return hooks.contains(shutdownHook);
+    }
+
+    /**
+     * Indicates if shutdown is in progress or not.
+     *
+     * @return TRUE if the shutdown is in progress, otherwise FALSE.
+     */
+    public boolean isShutdownInProgress() {
+        return shutdownInProgress.get();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
new file mode 100644
index 0000000..df77adb
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * The wrapper for native hadoop input splits.
+ *
+ * Warning!! This class must not depend on any Hadoop classes directly or indirectly.
+ */
+public class HadoopSplitWrapper extends HadoopInputSplit {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Native hadoop input split. */
+    private byte[] bytes;
+
+    /** */
+    private String clsName;
+
+    /** Internal ID */
+    private int id;
+
+    /**
+     * Creates new split wrapper.
+     */
+    public HadoopSplitWrapper() {
+        // No-op.
+    }
+
+    /**
+     * Creates new split wrapper.
+     *
+     * @param id Split ID.
+     * @param clsName Class name.
+     * @param bytes Serialized class.
+     * @param hosts Hosts where split is located.
+     */
+    public HadoopSplitWrapper(int id, String clsName, byte[] bytes, String[] hosts) {
+        assert hosts != null;
+        assert clsName != null;
+        assert bytes != null;
+
+        this.hosts = hosts;
+        this.id = id;
+
+        this.clsName = clsName;
+        this.bytes = bytes;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeInt(id);
+
+        out.writeUTF(clsName);
+        U.writeByteArray(out, bytes);
+    }
+
+    /**
+     * @return Class name.
+     */
+    public String className() {
+        return clsName;
+    }
+
+    /**
+     * @return Class bytes.
+     */
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        id = in.readInt();
+
+        clsName = in.readUTF();
+        bytes = U.readByteArray(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        HadoopSplitWrapper that = (HadoopSplitWrapper)o;
+
+        return id == that.id;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return id;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
new file mode 100644
index 0000000..abb904c
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.IOException;
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+
+/**
+ * Hadoop cleanup task (commits or aborts job).
+ */
+public class HadoopV2CleanupTask extends HadoopV2Task {
+    /** Abort flag. */
+    private final boolean abort;
+
+    /**
+     * @param taskInfo Task info.
+     * @param abort Abort flag.
+     */
+    public HadoopV2CleanupTask(HadoopTaskInfo taskInfo, boolean abort) {
+        super(taskInfo);
+
+        this.abort = abort;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ConstantConditions")
+    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
+        JobContextImpl jobCtx = taskCtx.jobContext();
+
+        try {
+            OutputFormat outputFormat = getOutputFormat(jobCtx);
+
+            OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
+
+            if (committer != null) {
+                if (abort)
+                    committer.abortJob(jobCtx, JobStatus.State.FAILED);
+                else
+                    committer.commitJob(jobCtx);
+            }
+        }
+        catch (ClassNotFoundException | IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            throw new IgniteInterruptedCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
new file mode 100644
index 0000000..2ff2945
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.IOException;
+import java.util.Iterator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.MapContext;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.ReduceContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
+
+/**
+ * Hadoop context implementation for v2 API. It provides IO operations for hadoop tasks.
+ */
+public class HadoopV2Context extends JobContextImpl implements MapContext, ReduceContext {
+    /** Input reader to overriding of HadoopTaskContext input. */
+    private RecordReader reader;
+
+    /** Output writer to overriding of HadoopTaskContext output. */
+    private RecordWriter writer;
+
+    /** Output is provided by executor environment. */
+    private final HadoopTaskOutput output;
+
+    /** Input is provided by executor environment. */
+    private final HadoopTaskInput input;
+
+    /** Unique identifier for a task attempt. */
+    private final TaskAttemptID taskAttemptID;
+
+    /** Indicates that this task is to be cancelled. */
+    private volatile boolean cancelled;
+
+    /** Input split. */
+    private InputSplit inputSplit;
+
+    /** */
+    private final HadoopTaskContext ctx;
+
+    /** */
+    private String status;
+
+    /**
+     * @param ctx Context for IO operations.
+     */
+    public HadoopV2Context(HadoopV2TaskContext ctx) {
+        super(ctx.jobConf(), ctx.jobContext().getJobID());
+
+        taskAttemptID = ctx.attemptId();
+
+        conf.set("mapreduce.job.id", taskAttemptID.getJobID().toString());
+        conf.set("mapreduce.task.id", taskAttemptID.getTaskID().toString());
+
+        output = ctx.output();
+        input = ctx.input();
+
+        this.ctx = ctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public InputSplit getInputSplit() {
+        if (inputSplit == null) {
+            HadoopInputSplit split = ctx.taskInfo().inputSplit();
+
+            if (split == null)
+                return null;
+
+            if (split instanceof HadoopFileBlock) {
+                HadoopFileBlock fileBlock = (HadoopFileBlock)split;
+
+                inputSplit = new FileSplit(new Path(fileBlock.file()), fileBlock.start(), fileBlock.length(), null);
+            }
+            else
+            {
+                try {
+                    inputSplit = (InputSplit) ((HadoopV2TaskContext)ctx).getNativeSplit(split);
+                } catch (IgniteCheckedException e) {
+                    throw new IllegalStateException(e);
+                }
+            }
+        }
+
+        return inputSplit;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (cancelled)
+            throw new HadoopTaskCancelledException("Task cancelled.");
+
+        return reader.nextKeyValue();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Object getCurrentKey() throws IOException, InterruptedException {
+        if (reader != null)
+            return reader.getCurrentKey();
+
+        return input.key();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Object getCurrentValue() throws IOException, InterruptedException {
+        return reader.getCurrentValue();
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void write(Object key, Object val) throws IOException, InterruptedException {
+        if (cancelled)
+            throw new HadoopTaskCancelledException("Task cancelled.");
+
+        if (writer != null)
+            writer.write(key, val);
+        else {
+            try {
+                output.write(key, val);
+            }
+            catch (IgniteCheckedException e) {
+                throw new IOException(e);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public OutputCommitter getOutputCommitter() {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public TaskAttemptID getTaskAttemptID() {
+        return taskAttemptID;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setStatus(String msg) {
+        status = msg;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getStatus() {
+        return status;
+    }
+
+    /** {@inheritDoc} */
+    @Override public float getProgress() {
+        return 0.5f; // TODO
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter getCounter(Enum<?> cntrName) {
+        return getCounter(cntrName.getDeclaringClass().getName(), cntrName.name());
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter getCounter(String grpName, String cntrName) {
+        return new HadoopV2Counter(ctx.counter(grpName, cntrName, HadoopLongCounter.class));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void progress() {
+        // No-op.
+    }
+
+    /**
+     * Overrides default input data reader.
+     *
+     * @param reader New reader.
+     */
+    public void reader(RecordReader reader) {
+        this.reader = reader;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean nextKey() throws IOException, InterruptedException {
+        if (cancelled)
+            throw new HadoopTaskCancelledException("Task cancelled.");
+
+        return input.next();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Iterable getValues() throws IOException, InterruptedException {
+        return new Iterable() {
+            @Override public Iterator iterator() {
+                return input.values();
+            }
+        };
+    }
+
+    /**
+     * @return Overridden output data writer.
+     */
+    public RecordWriter writer() {
+        return writer;
+    }
+
+    /**
+     * Overrides default output data writer.
+     *
+     * @param writer New writer.
+     */
+    public void writer(RecordWriter writer) {
+        this.writer = writer;
+    }
+
+    /**
+     * Cancels the task by stop the IO.
+     */
+    public void cancel() {
+        cancelled = true;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
new file mode 100644
index 0000000..cad9e64
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
+
+/**
+ * Adapter from own counter implementation into Hadoop API Counter od version 2.0.
+ */
+public class HadoopV2Counter implements Counter {
+    /** Delegate. */
+    private final HadoopLongCounter cntr;
+
+    /**
+     * Creates new instance with given delegate.
+     *
+     * @param cntr Internal counter.
+     */
+    public HadoopV2Counter(HadoopLongCounter cntr) {
+        assert cntr != null : "counter must be non-null";
+
+        this.cntr = cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDisplayName(String displayName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getName() {
+        return cntr.name();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getDisplayName() {
+        return getName();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getValue() {
+        return cntr.value();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setValue(long val) {
+        cntr.value(val);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void increment(long incr) {
+        cntr.increment(incr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Counter getUnderlyingCounter() {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(DataOutput out) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFields(DataInput in) throws IOException {
+        throw new UnsupportedOperationException("not implemented");
+    }
+}
\ No newline at end of file


[20/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
deleted file mode 100644
index bd8ed2d..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java
+++ /dev/null
@@ -1,1076 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs.v2;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.InvalidPathException;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.Progressable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyInputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyOutputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsModeResolver;
-import org.apache.ignite.internal.processors.igfs.IgfsPaths;
-import org.apache.ignite.internal.processors.igfs.IgfsStatus;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.BufferedOutputStream;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
-import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
-import static org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.getFsHadoopUser;
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_COLOCATED_WRITES;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_BATCH_SIZE;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.IGFS_SCHEME;
-
-/**
- * {@code IGFS} Hadoop 2.x file system driver over file system API. To use
- * {@code IGFS} as Hadoop file system, you should configure this class
- * in Hadoop's {@code core-site.xml} as follows:
- * <pre name="code" class="xml">
- *  &lt;property&gt;
- *      &lt;name&gt;fs.default.name&lt;/name&gt;
- *      &lt;value&gt;igfs://ipc&lt;/value&gt;
- *  &lt;/property&gt;
- *
- *  &lt;property&gt;
- *      &lt;name&gt;fs.igfs.impl&lt;/name&gt;
- *      &lt;value&gt;org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem&lt;/value&gt;
- *  &lt;/property&gt;
- * </pre>
- * You should also add Ignite JAR and all libraries to Hadoop classpath. To
- * do this, add following lines to {@code conf/hadoop-env.sh} script in Hadoop
- * distribution:
- * <pre name="code" class="bash">
- * export IGNITE_HOME=/path/to/Ignite/distribution
- * export HADOOP_CLASSPATH=$IGNITE_HOME/ignite*.jar
- *
- * for f in $IGNITE_HOME/libs/*.jar; do
- *  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f;
- * done
- * </pre>
- * <h1 class="header">Data vs Clients Nodes</h1>
- * Hadoop needs to use its FileSystem remotely from client nodes as well as directly on
- * data nodes. Client nodes are responsible for basic file system operations as well as
- * accessing data nodes remotely. Usually, client nodes are started together
- * with {@code job-submitter} or {@code job-scheduler} processes, while data nodes are usually
- * started together with Hadoop {@code task-tracker} processes.
- * <p>
- * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml}
- * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation.
- */
-public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closeable {
-    /** Logger. */
-    private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystem.class);
-
-    /** Ensures that close routine is invoked at most once. */
-    private final AtomicBoolean closeGuard = new AtomicBoolean();
-
-    /** Grid remote client. */
-    private HadoopIgfsWrapper rmtClient;
-
-    /** The name of the user this File System created on behalf of. */
-    private final String user;
-
-    /** Working directory. */
-    private IgfsPath workingDir;
-
-    /** URI. */
-    private final URI uri;
-
-    /** Authority. */
-    private String uriAuthority;
-
-    /** Client logger. */
-    private IgfsLogger clientLog;
-
-    /** Server block size. */
-    private long grpBlockSize;
-
-    /** Default replication factor. */
-    private short dfltReplication;
-
-    /** Secondary URI string. */
-    private URI secondaryUri;
-
-    /** Mode resolver. */
-    private IgfsModeResolver modeRslvr;
-
-    /** The secondary file system factory. */
-    private HadoopFileSystemFactory factory;
-
-    /** Whether custom sequential reads before prefetch value is provided. */
-    private boolean seqReadsBeforePrefetchOverride;
-
-    /** Custom-provided sequential reads before prefetch. */
-    private int seqReadsBeforePrefetch;
-
-    /** Flag that controls whether file writes should be colocated on data node. */
-    private boolean colocateFileWrites;
-
-    /** Prefer local writes. */
-    private boolean preferLocFileWrites;
-
-    /**
-     * @param name URI for file system.
-     * @param cfg Configuration.
-     * @throws URISyntaxException if name has invalid syntax.
-     * @throws IOException If initialization failed.
-     */
-    public IgniteHadoopFileSystem(URI name, Configuration cfg) throws URISyntaxException, IOException {
-        super(HadoopIgfsEndpoint.normalize(name), IGFS_SCHEME, false, -1);
-
-        uri = name;
-
-        user = getFsHadoopUser();
-
-        try {
-            initialize(name, cfg);
-        }
-        catch (IOException e) {
-            // Close client if exception occurred.
-            if (rmtClient != null)
-                rmtClient.close(false);
-
-            throw e;
-        }
-
-        workingDir = new IgfsPath("/user/" + user);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void checkPath(Path path) {
-        URI uri = path.toUri();
-
-        if (uri.isAbsolute()) {
-            if (!F.eq(uri.getScheme(), IGFS_SCHEME))
-                throw new InvalidPathException("Wrong path scheme [expected=" + IGFS_SCHEME + ", actual=" +
-                    uri.getAuthority() + ']');
-
-            if (!F.eq(uri.getAuthority(), uriAuthority))
-                throw new InvalidPathException("Wrong path authority [expected=" + uriAuthority + ", actual=" +
-                    uri.getAuthority() + ']');
-        }
-    }
-
-    /**
-     * Public setter that can be used by direct users of FS or Visor.
-     *
-     * @param colocateFileWrites Whether all ongoing file writes should be colocated.
-     */
-    @SuppressWarnings("UnusedDeclaration")
-    public void colocateFileWrites(boolean colocateFileWrites) {
-        this.colocateFileWrites = colocateFileWrites;
-    }
-
-    /**
-     * Enter busy state.
-     *
-     * @throws IOException If file system is stopped.
-     */
-    private void enterBusy() throws IOException {
-        if (closeGuard.get())
-            throw new IOException("File system is stopped.");
-    }
-
-    /**
-     * Leave busy state.
-     */
-    private void leaveBusy() {
-        // No-op.
-    }
-
-    /**
-     * @param name URI passed to constructor.
-     * @param cfg Configuration passed to constructor.
-     * @throws IOException If initialization failed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private void initialize(URI name, Configuration cfg) throws IOException {
-        enterBusy();
-
-        try {
-            if (rmtClient != null)
-                throw new IOException("File system is already initialized: " + rmtClient);
-
-            A.notNull(name, "name");
-            A.notNull(cfg, "cfg");
-
-            if (!IGFS_SCHEME.equals(name.getScheme()))
-                throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME +
-                    "://[name]/[optional_path], actual=" + name + ']');
-
-            uriAuthority = name.getAuthority();
-
-            // Override sequential reads before prefetch if needed.
-            seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
-
-            if (seqReadsBeforePrefetch > 0)
-                seqReadsBeforePrefetchOverride = true;
-
-            // In Ignite replication factor is controlled by data cache affinity.
-            // We use replication factor to force the whole file to be stored on local node.
-            dfltReplication = (short)cfg.getInt("dfs.replication", 3);
-
-            // Get file colocation control flag.
-            colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
-            preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
-
-            // Get log directory.
-            String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
-
-            File logDirFile = U.resolveIgnitePath(logDirCfg);
-
-            String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
-
-            rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
-
-            // Handshake.
-            IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
-
-            grpBlockSize = handshake.blockSize();
-
-            IgfsPaths paths = handshake.secondaryPaths();
-
-            Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
-
-            if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
-                // Initiate client logger.
-                if (logDir == null)
-                    throw new IOException("Failed to resolve log directory: " + logDirCfg);
-
-                Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
-
-                clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
-            }
-            else
-                clientLog = IgfsLogger.disabledLogger();
-
-            try {
-                modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
-            }
-            catch (IgniteCheckedException ice) {
-                throw new IOException(ice);
-            }
-
-            boolean initSecondary = paths.defaultMode() == PROXY;
-
-            if (!initSecondary && paths.pathModes() != null) {
-                for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
-                    IgfsMode mode = pathMode.getValue();
-
-                    if (mode == PROXY) {
-                        initSecondary = true;
-
-                        break;
-                    }
-                }
-            }
-
-            if (initSecondary) {
-                try {
-                    factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
-                }
-                catch (IgniteCheckedException e) {
-                    throw new IOException("Failed to get secondary file system factory.", e);
-                }
-
-                if (factory == null)
-                    throw new IOException("Failed to get secondary file system factory (did you set " +
-                        IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " +
-                        FileSystemConfiguration.class.getName() + "?)");
-
-                assert factory != null;
-
-                if (factory instanceof LifecycleAware)
-                    ((LifecycleAware) factory).start();
-
-                try {
-                    FileSystem secFs = factory.get(user);
-
-                    secondaryUri = secFs.getUri();
-
-                    A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
-                }
-                catch (IOException e) {
-                    throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
-                }
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IOException {
-        if (closeGuard.compareAndSet(false, true)) {
-            if (rmtClient == null)
-                return;
-
-            rmtClient.close(false);
-
-            if (clientLog.isLogEnabled())
-                clientLog.close();
-
-            if (factory instanceof LifecycleAware)
-                ((LifecycleAware) factory).stop();
-
-            // Reset initialized resources.
-            rmtClient = null;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public URI getUri() {
-        return uri;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int getUriDefaultPort() {
-        return -1;
-    }
-
-    /** {@inheritDoc} */
-    @Override public FsServerDefaults getServerDefaults() throws IOException {
-        return new FsServerDefaults(grpBlockSize, (int)grpBlockSize, (int)grpBlockSize, dfltReplication, 64 * 1024,
-            false, 0, DataChecksum.Type.NULL);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean setReplication(Path f, short replication) throws IOException {
-        return mode(f) == PROXY && secondaryFileSystem().setReplication(f, replication);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setTimes(Path f, long mtime, long atime) throws IOException {
-        if (mode(f) == PROXY)
-            secondaryFileSystem().setTimes(f, mtime, atime);
-        else {
-            if (mtime == -1 && atime == -1)
-                return;
-
-            rmtClient.setTimes(convert(f), atime, mtime);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FsStatus getFsStatus() throws IOException {
-        IgfsStatus status = rmtClient.fsStatus();
-
-        return new FsStatus(status.spaceTotal(), status.spaceUsed(), status.spaceTotal() - status.spaceUsed());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setPermission(Path p, FsPermission perm) throws IOException {
-        enterBusy();
-
-        try {
-            A.notNull(p, "p");
-
-            if (mode(p) == PROXY)
-                secondaryFileSystem().setPermission(toSecondary(p), perm);
-            else {
-                if (rmtClient.update(convert(p), permission(perm)) == null)
-                    throw new IOException("Failed to set file permission (file not found?)" +
-                        " [path=" + p + ", perm=" + perm + ']');
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setOwner(Path p, String usr, String grp) throws IOException {
-        A.notNull(p, "p");
-        A.notNull(usr, "username");
-        A.notNull(grp, "grpName");
-
-        enterBusy();
-
-        try {
-            if (mode(p) == PROXY)
-                secondaryFileSystem().setOwner(toSecondary(p), usr, grp);
-            else if (rmtClient.update(convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, usr,
-                IgfsUtils.PROP_GROUP_NAME, grp)) == null) {
-                throw new IOException("Failed to set file permission (file not found?)" +
-                    " [path=" + p + ", username=" + usr + ", grpName=" + grp + ']');
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FSDataInputStream open(Path f, int bufSize) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = modeRslvr.resolveMode(path);
-
-            if (mode == PROXY) {
-                FSDataInputStream is = secondaryFileSystem().open(toSecondary(f), bufSize);
-
-                if (clientLog.isLogEnabled()) {
-                    // At this point we do not know file size, so we perform additional request to remote FS to get it.
-                    FileStatus status = secondaryFileSystem().getFileStatus(toSecondary(f));
-
-                    long size = status != null ? status.getLen() : -1;
-
-                    long logId = IgfsLogger.nextId();
-
-                    clientLog.logOpen(logId, path, PROXY, bufSize, size);
-
-                    return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId));
-                }
-                else
-                    return is;
-            }
-            else {
-                HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ?
-                    rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
-
-                long logId = -1;
-
-                if (clientLog.isLogEnabled()) {
-                    logId = IgfsLogger.nextId();
-
-                    clientLog.logOpen(logId, path, mode, bufSize, stream.length());
-                }
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path +
-                        ", bufSize=" + bufSize + ']');
-
-                HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(),
-                    bufSize, LOG, clientLog, logId);
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
-
-                return new FSDataInputStream(igfsIn);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public FSDataOutputStream createInternal(
-        Path f,
-        EnumSet<CreateFlag> flag,
-        FsPermission perm,
-        int bufSize,
-        short replication,
-        long blockSize,
-        Progressable progress,
-        Options.ChecksumOpt checksumOpt,
-        boolean createParent
-    ) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
-        boolean append = flag.contains(CreateFlag.APPEND);
-        boolean create = flag.contains(CreateFlag.CREATE);
-
-        OutputStream out = null;
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = modeRslvr.resolveMode(path);
-
-            if (LOG.isDebugEnabled())
-                LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" +
-                    path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
-
-            if (mode == PROXY) {
-                FSDataOutputStream os = secondaryFileSystem().create(toSecondary(f), perm, flag, bufSize,
-                    replication, blockSize, progress);
-
-                if (clientLog.isLogEnabled()) {
-                    long logId = IgfsLogger.nextId();
-
-                    if (append)
-                        clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
-                    else
-                        clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);
-
-                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
-                }
-                else
-                    return os;
-            }
-            else {
-                Map<String, String> permMap = F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm),
-                    IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
-
-                // Create stream and close it in the 'finally' section if any sequential operation failed.
-                HadoopIgfsStreamDelegate stream;
-
-                long logId = -1;
-
-                if (append) {
-                    stream = rmtClient.append(path, create, permMap);
-
-                    if (clientLog.isLogEnabled()) {
-                        logId = IgfsLogger.nextId();
-
-                        clientLog.logAppend(logId, path, mode, bufSize);
-                    }
-
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
-                }
-                else {
-                    stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize,
-                        permMap);
-
-                    if (clientLog.isLogEnabled()) {
-                        logId = IgfsLogger.nextId();
-
-                        clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
-                    }
-
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
-                }
-
-                assert stream != null;
-
-                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG,
-                    clientLog, logId);
-
-                bufSize = Math.max(64 * 1024, bufSize);
-
-                out = new BufferedOutputStream(igfsOut, bufSize);
-
-                FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
-
-                // Mark stream created successfully.
-                out = null;
-
-                return res;
-            }
-        }
-        finally {
-            // Close if failed during stream creation.
-            if (out != null)
-                U.closeQuiet(out);
-
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean supportsSymlinks() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void renameInternal(Path src, Path dst) throws IOException {
-        A.notNull(src, "src");
-        A.notNull(dst, "dst");
-
-        enterBusy();
-
-        try {
-            IgfsPath srcPath = convert(src);
-            IgfsPath dstPath = convert(dst);
-
-            IgfsMode srcMode = modeRslvr.resolveMode(srcPath);
-
-            if (clientLog.isLogEnabled())
-                clientLog.logRename(srcPath, srcMode, dstPath);
-
-            if (srcMode == PROXY)
-                secondaryFileSystem().rename(toSecondary(src), toSecondary(dst));
-            else
-                rmtClient.rename(srcPath, dstPath);
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean delete(Path f, boolean recursive) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-
-            IgfsMode mode = modeRslvr.resolveMode(path);
-
-            if (mode == PROXY) {
-                if (clientLog.isLogEnabled())
-                    clientLog.logDelete(path, PROXY, recursive);
-
-                return secondaryFileSystem().delete(toSecondary(f), recursive);
-            }
-
-            boolean res = rmtClient.delete(path, recursive);
-
-            if (clientLog.isLogEnabled())
-                clientLog.logDelete(path, mode, recursive);
-
-            return res;
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
-        // Checksum has effect for secondary FS only.
-        if (factory != null)
-            secondaryFileSystem().setVerifyChecksum(verifyChecksum);
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileChecksum getFileChecksum(Path f) throws IOException {
-        if (mode(f) == PROXY)
-            return secondaryFileSystem().getFileChecksum(f);
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus[] listStatus(Path f) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = modeRslvr.resolveMode(path);
-
-            if (mode == PROXY) {
-                FileStatus[] arr = secondaryFileSystem().listStatus(toSecondary(f));
-
-                if (arr == null)
-                    throw new FileNotFoundException("File " + f + " does not exist.");
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = toPrimary(arr[i]);
-
-                if (clientLog.isLogEnabled()) {
-                    String[] fileArr = new String[arr.length];
-
-                    for (int i = 0; i < arr.length; i++)
-                        fileArr[i] = arr[i].getPath().toString();
-
-                    clientLog.logListDirectory(path, PROXY, fileArr);
-                }
-
-                return arr;
-            }
-            else {
-                Collection<IgfsFile> list = rmtClient.listFiles(path);
-
-                if (list == null)
-                    throw new FileNotFoundException("File " + f + " does not exist.");
-
-                List<IgfsFile> files = new ArrayList<>(list);
-
-                FileStatus[] arr = new FileStatus[files.size()];
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = convert(files.get(i));
-
-                if (clientLog.isLogEnabled()) {
-                    String[] fileArr = new String[arr.length];
-
-                    for (int i = 0; i < arr.length; i++)
-                        fileArr[i] = arr[i].getPath().toString();
-
-                    clientLog.logListDirectory(path, mode, fileArr);
-                }
-
-                return arr;
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdir(Path f, FsPermission perm, boolean createParent) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = modeRslvr.resolveMode(path);
-
-            if (mode == PROXY) {
-                if (clientLog.isLogEnabled())
-                    clientLog.logMakeDirectory(path, PROXY);
-
-                secondaryFileSystem().mkdirs(toSecondary(f), perm);
-            }
-            else {
-                rmtClient.mkdirs(path, permission(perm));
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logMakeDirectory(path, mode);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus getFileStatus(Path f) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            if (mode(f) == PROXY)
-                return toPrimary(secondaryFileSystem().getFileStatus(toSecondary(f)));
-            else {
-                IgfsFile info = rmtClient.info(convert(f));
-
-                if (info == null)
-                    throw new FileNotFoundException("File not found: " + f);
-
-                return convert(info);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws IOException {
-        A.notNull(path, "path");
-
-        IgfsPath igfsPath = convert(path);
-
-        enterBusy();
-
-        try {
-            if (modeRslvr.resolveMode(igfsPath) == PROXY)
-                return secondaryFileSystem().getFileBlockLocations(path, start, len);
-            else {
-                long now = System.currentTimeMillis();
-
-                List<IgfsBlockLocation> affinity = new ArrayList<>(
-                    rmtClient.affinity(igfsPath, start, len));
-
-                BlockLocation[] arr = new BlockLocation[affinity.size()];
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = convert(affinity.get(i));
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Fetched file locations [path=" + path + ", fetchTime=" +
-                        (System.currentTimeMillis() - now) + ", locations=" + Arrays.asList(arr) + ']');
-
-                return arr;
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /**
-     * Resolve path mode.
-     *
-     * @param path HDFS path.
-     * @return Path mode.
-     */
-    public IgfsMode mode(Path path) {
-        return modeRslvr.resolveMode(convert(path));
-    }
-
-    /**
-     * Convert the given path to path acceptable by the primary file system.
-     *
-     * @param path Path.
-     * @return Primary file system path.
-     */
-    private Path toPrimary(Path path) {
-        return convertPath(path, getUri());
-    }
-
-    /**
-     * Convert the given path to path acceptable by the secondary file system.
-     *
-     * @param path Path.
-     * @return Secondary file system path.
-     */
-    private Path toSecondary(Path path) {
-        assert factory != null;
-        assert secondaryUri != null;
-
-        return convertPath(path, secondaryUri);
-    }
-
-    /**
-     * Convert path using the given new URI.
-     *
-     * @param path Old path.
-     * @param newUri New URI.
-     * @return New path.
-     */
-    private Path convertPath(Path path, URI newUri) {
-        assert newUri != null;
-
-        if (path != null) {
-            URI pathUri = path.toUri();
-
-            try {
-                return new Path(new URI(pathUri.getScheme() != null ? newUri.getScheme() : null,
-                    pathUri.getAuthority() != null ? newUri.getAuthority() : null, pathUri.getPath(), null, null));
-            }
-            catch (URISyntaxException e) {
-                throw new IgniteException("Failed to construct secondary file system path from the primary file " +
-                    "system path: " + path, e);
-            }
-        }
-        else
-            return null;
-    }
-
-    /**
-     * Convert a file status obtained from the secondary file system to a status of the primary file system.
-     *
-     * @param status Secondary file system status.
-     * @return Primary file system status.
-     */
-    private FileStatus toPrimary(FileStatus status) {
-        return status != null ? new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(),
-            status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(),
-            status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null;
-    }
-
-    /**
-     * Convert IGFS path into Hadoop path.
-     *
-     * @param path IGFS path.
-     * @return Hadoop path.
-     */
-    private Path convert(IgfsPath path) {
-        return new Path(IGFS_SCHEME, uriAuthority, path.toString());
-    }
-
-    /**
-     * Convert Hadoop path into IGFS path.
-     *
-     * @param path Hadoop path.
-     * @return IGFS path.
-     */
-    @Nullable private IgfsPath convert(Path path) {
-        if (path == null)
-            return null;
-
-        return path.isAbsolute() ? new IgfsPath(path.toUri().getPath()) :
-            new IgfsPath(workingDir, path.toUri().getPath());
-    }
-
-    /**
-     * Convert IGFS affinity block location into Hadoop affinity block location.
-     *
-     * @param block IGFS affinity block location.
-     * @return Hadoop affinity block location.
-     */
-    private BlockLocation convert(IgfsBlockLocation block) {
-        Collection<String> names = block.names();
-        Collection<String> hosts = block.hosts();
-
-        return new BlockLocation(
-            names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
-            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */,
-            block.start(), block.length()
-        ) {
-            @Override public String toString() {
-                try {
-                    return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() +
-                        ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
-                }
-                catch (IOException e) {
-                    throw new RuntimeException(e);
-                }
-            }
-        };
-    }
-
-    /**
-     * Convert IGFS file information into Hadoop file status.
-     *
-     * @param file IGFS file information.
-     * @return Hadoop file status.
-     */
-    private FileStatus convert(IgfsFile file) {
-        return new FileStatus(
-            file.length(),
-            file.isDirectory(),
-            dfltReplication,
-            file.groupBlockSize(),
-            file.modificationTime(),
-            file.accessTime(),
-            permission(file),
-            file.property(IgfsUtils.PROP_USER_NAME, user),
-            file.property(IgfsUtils.PROP_GROUP_NAME, "users"),
-            convert(file.path())) {
-            @Override public String toString() {
-                return "FileStatus [path=" + getPath() + ", isDir=" + isDirectory() + ", len=" + getLen() + "]";
-            }
-        };
-    }
-
-    /**
-     * Convert Hadoop permission into IGFS file attribute.
-     *
-     * @param perm Hadoop permission.
-     * @return IGFS attributes.
-     */
-    private Map<String, String> permission(FsPermission perm) {
-        if (perm == null)
-            perm = FsPermission.getDefault();
-
-        return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
-    }
-
-    /**
-     * @param perm Permission.
-     * @return String.
-     */
-    private static String toString(FsPermission perm) {
-        return String.format("%04o", perm.toShort());
-    }
-
-    /**
-     * Convert IGFS file attributes into Hadoop permission.
-     *
-     * @param file File info.
-     * @return Hadoop permission.
-     */
-    private FsPermission permission(IgfsFile file) {
-        String perm = file.property(IgfsUtils.PROP_PERMISSION, null);
-
-        if (perm == null)
-            return FsPermission.getDefault();
-
-        try {
-            return new FsPermission((short)Integer.parseInt(perm, 8));
-        }
-        catch (NumberFormatException ignore) {
-            return FsPermission.getDefault();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgniteHadoopFileSystem.class, this);
-    }
-
-    /**
-     * Returns the user name this File System is created on behalf of.
-     * @return the user name
-     */
-    public String user() {
-        return user;
-    }
-
-    /**
-     * Gets cached or creates a {@link FileSystem}.
-     *
-     * @return The secondary file system.
-     */
-    private FileSystem secondaryFileSystem() throws IOException{
-        assert factory != null;
-
-        return factory.get(user);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
deleted file mode 100644
index d8e70d1..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains Ignite Hadoop 2.x <code>FileSystem</code> implementation.
- */
-package org.apache.ignite.hadoop.fs.v2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
deleted file mode 100644
index 583af35..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.mapreduce;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Collections;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.client.GridClient;
-import org.apache.ignite.internal.client.GridClientConfiguration;
-import org.apache.ignite.internal.client.GridClientException;
-import org.apache.ignite.internal.client.GridClientFactory;
-import org.apache.ignite.internal.client.marshaller.jdk.GridClientJdkMarshaller;
-import org.apache.ignite.internal.processors.hadoop.proto.HadoopClientProtocol;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.typedef.F;
-
-import static org.apache.ignite.internal.client.GridClientProtocol.TCP;
-
-
-/**
- * Ignite Hadoop client protocol provider.
- */
-public class IgniteHadoopClientProtocolProvider extends ClientProtocolProvider {
-    /** Framework name used in configuration. */
-    public static final String FRAMEWORK_NAME = "ignite";
-
-    /** Clients. */
-    private static final ConcurrentHashMap<String, IgniteInternalFuture<GridClient>> cliMap = new ConcurrentHashMap<>();
-
-    /** {@inheritDoc} */
-    @Override public ClientProtocol create(Configuration conf) throws IOException {
-        if (FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
-            String addr = conf.get(MRConfig.MASTER_ADDRESS);
-
-            if (F.isEmpty(addr))
-                throw new IOException("Failed to create client protocol because server address is not specified (is " +
-                    MRConfig.MASTER_ADDRESS + " property set?).");
-
-            if (F.eq(addr, "local"))
-                throw new IOException("Local execution mode is not supported, please point " +
-                    MRConfig.MASTER_ADDRESS + " to real Ignite node.");
-
-            return createProtocol(addr, conf);
-        }
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public ClientProtocol create(InetSocketAddress addr, Configuration conf) throws IOException {
-        if (FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME)))
-            return createProtocol(addr.getHostString() + ":" + addr.getPort(), conf);
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close(ClientProtocol cliProto) throws IOException {
-        // No-op.
-    }
-
-    /**
-     * Internal protocol creation routine.
-     *
-     * @param addr Address.
-     * @param conf Configuration.
-     * @return Client protocol.
-     * @throws IOException If failed.
-     */
-    private static ClientProtocol createProtocol(String addr, Configuration conf) throws IOException {
-        return new HadoopClientProtocol(conf, client(addr));
-    }
-
-    /**
-     * Create client.
-     *
-     * @param addr Endpoint address.
-     * @return Client.
-     * @throws IOException If failed.
-     */
-    private static GridClient client(String addr) throws IOException {
-        try {
-            IgniteInternalFuture<GridClient> fut = cliMap.get(addr);
-
-            if (fut == null) {
-                GridFutureAdapter<GridClient> fut0 = new GridFutureAdapter<>();
-
-                IgniteInternalFuture<GridClient> oldFut = cliMap.putIfAbsent(addr, fut0);
-
-                if (oldFut != null)
-                    return oldFut.get();
-                else {
-                    GridClientConfiguration cliCfg = new GridClientConfiguration();
-
-                    cliCfg.setProtocol(TCP);
-                    cliCfg.setServers(Collections.singletonList(addr));
-                    cliCfg.setMarshaller(new GridClientJdkMarshaller());
-                    cliCfg.setMaxConnectionIdleTime(24 * 60 * 60 * 1000L); // 1 day.
-                    cliCfg.setDaemon(true);
-
-                    try {
-                        GridClient cli = GridClientFactory.start(cliCfg);
-
-                        fut0.onDone(cli);
-
-                        return cli;
-                    }
-                    catch (GridClientException e) {
-                        fut0.onDone(e);
-
-                        throw new IOException("Failed to establish connection with Ignite node: " + addr, e);
-                    }
-                }
-            }
-            else
-                return fut.get();
-        }
-        catch (IgniteCheckedException e) {
-            throw new IOException("Failed to establish connection with Ignite node: " + addr, e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java
index d4a44fa..e1101c5 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java
@@ -17,16 +17,6 @@
 
 package org.apache.ignite.hadoop.mapreduce;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.UUID;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cluster.ClusterNode;
@@ -38,13 +28,23 @@ import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
 import org.apache.ignite.internal.processors.hadoop.HadoopJob;
 import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
 import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint;
-import org.apache.ignite.internal.processors.hadoop.planner.HadoopDefaultMapReducePlan;
 import org.apache.ignite.internal.processors.hadoop.planner.HadoopAbstractMapReducePlanner;
+import org.apache.ignite.internal.processors.hadoop.planner.HadoopDefaultMapReducePlan;
 import org.apache.ignite.internal.processors.igfs.IgfsEx;
 import org.apache.ignite.internal.util.typedef.F;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.UUID;
+
 import static org.apache.ignite.IgniteFileSystem.IGFS_SCHEME;
 
 /**

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopWeightedMapReducePlanner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopWeightedMapReducePlanner.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopWeightedMapReducePlanner.java
index 27ffc19..2d1ac0b 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopWeightedMapReducePlanner.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopWeightedMapReducePlanner.java
@@ -24,11 +24,11 @@ import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.igfs.IgfsBlockLocation;
 import org.apache.ignite.igfs.IgfsPath;
 import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.hadoop.HadoopCommonUtils;
 import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
 import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
 import org.apache.ignite.internal.processors.hadoop.HadoopJob;
 import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
 import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint;
 import org.apache.ignite.internal.processors.hadoop.planner.HadoopAbstractMapReducePlanner;
 import org.apache.ignite.internal.processors.hadoop.planner.HadoopDefaultMapReducePlan;
@@ -116,7 +116,7 @@ public class IgniteHadoopWeightedMapReducePlanner extends HadoopAbstractMapReduc
     /** {@inheritDoc} */
     @Override public HadoopMapReducePlan preparePlan(HadoopJob job, Collection<ClusterNode> nodes,
         @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException {
-        List<HadoopInputSplit> splits = HadoopUtils.sortInputSplits(job.input());
+        List<HadoopInputSplit> splits = HadoopCommonUtils.sortInputSplits(job.input());
         int reducerCnt = job.info().reducers();
 
         if (reducerCnt < 0)

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
deleted file mode 100644
index 7635b9e..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Ignite Hadoop Accelerator map-reduce classes.
- */
-package org.apache.ignite.hadoop.mapreduce;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/util/UserNameMapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/util/UserNameMapper.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/util/UserNameMapper.java
index 26dc4b2..12669aa 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/util/UserNameMapper.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/util/UserNameMapper.java
@@ -17,14 +17,12 @@
 
 package org.apache.ignite.hadoop.util;
 
-import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
 import org.jetbrains.annotations.Nullable;
 
 import java.io.Serializable;
 
 /**
- * Hadoop file system name mapper. Used by {@link HadoopFileSystemFactory} implementation to pass proper user names
- * to the underlying Hadoop file system.
+ * Hadoop file system name mapper. Ensures that correct user name is passed to the underlying Hadoop file system.
  */
 public interface UserNameMapper extends Serializable {
     /**

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
deleted file mode 100644
index 23eaa18..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopAttributes.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.internal.IgniteNodeAttributes;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Arrays;
-
-/**
- * Hadoop attributes.
- */
-public class HadoopAttributes implements Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Attribute name. */
-    public static final String NAME = IgniteNodeAttributes.ATTR_PREFIX + ".hadoop";
-
-    /** Map-reduce planner class name. */
-    private String plannerCls;
-
-    /** External executor flag. */
-    private boolean extExec;
-
-    /** Maximum parallel tasks. */
-    private int maxParallelTasks;
-
-    /** Maximum task queue size. */
-    private int maxTaskQueueSize;
-
-    /** Library names. */
-    @GridToStringExclude
-    private String[] libNames;
-
-    /** Number of cores. */
-    private int cores;
-
-    /**
-     * Get attributes for node (if any).
-     *
-     * @param node Node.
-     * @return Attributes or {@code null} if Hadoop Accelerator is not enabled for node.
-     */
-    @Nullable public static HadoopAttributes forNode(ClusterNode node) {
-        return node.attribute(NAME);
-    }
-
-    /**
-     * {@link Externalizable} support.
-     */
-    public HadoopAttributes() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param cfg Configuration.
-     */
-    public HadoopAttributes(HadoopConfiguration cfg) {
-        assert cfg != null;
-        assert cfg.getMapReducePlanner() != null;
-
-        plannerCls = cfg.getMapReducePlanner().getClass().getName();
-
-        // TODO: IGNITE-404: Get from configuration when fixed.
-        extExec = false;
-
-        maxParallelTasks = cfg.getMaxParallelTasks();
-        maxTaskQueueSize = cfg.getMaxTaskQueueSize();
-        libNames = cfg.getNativeLibraryNames();
-
-        // Cores count already passed in other attributes, we add it here for convenience.
-        cores = Runtime.getRuntime().availableProcessors();
-    }
-
-    /**
-     * @return Map reduce planner class name.
-     */
-    public String plannerClassName() {
-        return plannerCls;
-    }
-
-    /**
-     * @return External execution flag.
-     */
-    public boolean externalExecution() {
-        return extExec;
-    }
-
-    /**
-     * @return Maximum parallel tasks.
-     */
-    public int maxParallelTasks() {
-        return maxParallelTasks;
-    }
-
-    /**
-     * @return Maximum task queue size.
-     */
-    public int maxTaskQueueSize() {
-        return maxTaskQueueSize;
-    }
-
-
-    /**
-     * @return Native library names.
-     */
-    public String[] nativeLibraryNames() {
-        return libNames;
-    }
-
-    /**
-     * @return Number of cores on machine.
-     */
-    public int cores() {
-        return cores;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeObject(plannerCls);
-        out.writeBoolean(extExec);
-        out.writeInt(maxParallelTasks);
-        out.writeInt(maxTaskQueueSize);
-        out.writeObject(libNames);
-        out.writeInt(cores);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        plannerCls = (String)in.readObject();
-        extExec = in.readBoolean();
-        maxParallelTasks = in.readInt();
-        maxTaskQueueSize = in.readInt();
-        libNames = (String[])in.readObject();
-        cores = in.readInt();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopAttributes.class, this, "libNames", Arrays.toString(libNames));
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopCommonUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopCommonUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopCommonUtils.java
new file mode 100644
index 0000000..83f94ce
--- /dev/null
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopCommonUtils.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.TreeSet;
+
+/**
+ * Common Hadoop utility methods which do not depend on Hadoop API.
+ */
+public class HadoopCommonUtils {
+    /**
+     * Sort input splits by length.
+     *
+     * @param splits Splits.
+     * @return Sorted splits.
+     */
+    public static List<HadoopInputSplit> sortInputSplits(Collection<HadoopInputSplit> splits) {
+        int id = 0;
+
+        TreeSet<SplitSortWrapper> sortedSplits = new TreeSet<>();
+
+        for (HadoopInputSplit split : splits) {
+            long len = split instanceof HadoopFileBlock ? ((HadoopFileBlock)split).length() : 0;
+
+            sortedSplits.add(new SplitSortWrapper(id++, split, len));
+        }
+
+        ArrayList<HadoopInputSplit> res = new ArrayList<>(sortedSplits.size());
+
+        for (SplitSortWrapper sortedSplit : sortedSplits)
+            res.add(sortedSplit.split);
+
+        return res;
+    }
+
+    /**
+     * Split wrapper for sorting.
+     */
+    private static class SplitSortWrapper implements Comparable<SplitSortWrapper> {
+        /** Unique ID. */
+        private final int id;
+
+        /** Split. */
+        private final HadoopInputSplit split;
+
+        /** Split length. */
+        private final long len;
+
+        /**
+         * Constructor.
+         *
+         * @param id Unique ID.
+         * @param split Split.
+         * @param len Split length.
+         */
+        public SplitSortWrapper(int id, HadoopInputSplit split, long len) {
+            this.id = id;
+            this.split = split;
+            this.len = len;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("NullableProblems")
+        @Override public int compareTo(SplitSortWrapper other) {
+            long res = len - other.len;
+
+            if (res > 0)
+                return -1;
+            else if (res < 0)
+                return 1;
+            else
+                return id - other.id;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return id;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object obj) {
+            return obj instanceof SplitSortWrapper && id == ((SplitSortWrapper)obj).id;
+        }
+    }
+
+    /**
+     * Private constructor.
+     */
+    private HadoopCommonUtils() {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
deleted file mode 100644
index aeda5c0..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-
-/**
- * Abstract class for all hadoop components.
- */
-public abstract class HadoopComponent {
-    /** Hadoop context. */
-    protected HadoopContext ctx;
-
-    /** Logger. */
-    protected IgniteLogger log;
-
-    /**
-     * @param ctx Hadoop context.
-     */
-    public void start(HadoopContext ctx) throws IgniteCheckedException {
-        this.ctx = ctx;
-
-        log = ctx.kernalContext().log(getClass());
-    }
-
-    /**
-     * Stops manager.
-     */
-    public void stop(boolean cancel) {
-        // No-op.
-    }
-
-    /**
-     * Callback invoked when all grid components are started.
-     */
-    public void onKernalStart() throws IgniteCheckedException {
-        // No-op.
-    }
-
-    /**
-     * Callback invoked before all grid components are stopped.
-     */
-    public void onKernalStop(boolean cancel) {
-        // No-op.
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
deleted file mode 100644
index 42a3d72..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.internal.GridKernalContext;
-import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
-import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffle;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskExecutorAdapter;
-import org.apache.ignite.internal.util.typedef.internal.CU;
-
-/**
- * Hadoop accelerator context.
- */
-public class HadoopContext {
-    /** Kernal context. */
-    private GridKernalContext ctx;
-
-    /** Hadoop configuration. */
-    private HadoopConfiguration cfg;
-
-    /** Job tracker. */
-    private HadoopJobTracker jobTracker;
-
-    /** External task executor. */
-    private HadoopTaskExecutorAdapter taskExecutor;
-
-    /** */
-    private HadoopShuffle shuffle;
-
-    /** Managers list. */
-    private List<HadoopComponent> components = new ArrayList<>();
-
-    /**
-     * @param ctx Kernal context.
-     */
-    public HadoopContext(
-        GridKernalContext ctx,
-        HadoopConfiguration cfg,
-        HadoopJobTracker jobTracker,
-        HadoopTaskExecutorAdapter taskExecutor,
-        HadoopShuffle shuffle
-    ) {
-        this.ctx = ctx;
-        this.cfg = cfg;
-
-        this.jobTracker = add(jobTracker);
-        this.taskExecutor = add(taskExecutor);
-        this.shuffle = add(shuffle);
-    }
-
-    /**
-     * Gets list of managers.
-     *
-     * @return List of managers.
-     */
-    public List<HadoopComponent> components() {
-        return components;
-    }
-
-    /**
-     * Gets kernal context.
-     *
-     * @return Grid kernal context instance.
-     */
-    public GridKernalContext kernalContext() {
-        return ctx;
-    }
-
-    /**
-     * Gets Hadoop configuration.
-     *
-     * @return Hadoop configuration.
-     */
-    public HadoopConfiguration configuration() {
-        return cfg;
-    }
-
-    /**
-     * Gets local node ID. Shortcut for {@code kernalContext().localNodeId()}.
-     *
-     * @return Local node ID.
-     */
-    public UUID localNodeId() {
-        return ctx.localNodeId();
-    }
-
-    /**
-     * Gets local node order.
-     *
-     * @return Local node order.
-     */
-    public long localNodeOrder() {
-        assert ctx.discovery() != null;
-
-        return ctx.discovery().localNode().order();
-    }
-
-    /**
-     * @return Hadoop-enabled nodes.
-     */
-    public Collection<ClusterNode> nodes() {
-        return ctx.discovery().cacheNodes(CU.SYS_CACHE_HADOOP_MR, ctx.discovery().topologyVersionEx());
-    }
-
-    /**
-     * @return {@code True} if
-     */
-    public boolean jobUpdateLeader() {
-        long minOrder = Long.MAX_VALUE;
-        ClusterNode minOrderNode = null;
-
-        for (ClusterNode node : nodes()) {
-            if (node.order() < minOrder) {
-                minOrder = node.order();
-                minOrderNode = node;
-            }
-        }
-
-        assert minOrderNode != null;
-
-        return localNodeId().equals(minOrderNode.id());
-    }
-
-    /**
-     * @param meta Job metadata.
-     * @return {@code true} If local node is participating in job execution.
-     */
-    public boolean isParticipating(HadoopJobMetadata meta) {
-        UUID locNodeId = localNodeId();
-
-        if (locNodeId.equals(meta.submitNodeId()))
-            return true;
-
-        HadoopMapReducePlan plan = meta.mapReducePlan();
-
-        return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader();
-    }
-
-    /**
-     * @return Jon tracker instance.
-     */
-    public HadoopJobTracker jobTracker() {
-        return jobTracker;
-    }
-
-    /**
-     * @return Task executor.
-     */
-    public HadoopTaskExecutorAdapter taskExecutor() {
-        return taskExecutor;
-    }
-
-    /**
-     * @return Shuffle.
-     */
-    public HadoopShuffle shuffle() {
-        return shuffle;
-    }
-
-    /**
-     * @return Map-reduce planner.
-     */
-    public HadoopMapReducePlanner planner() {
-        return cfg.getMapReducePlanner();
-    }
-
-    /**
-     * Adds component.
-     *
-     * @param c Component to add.
-     * @return Added manager.
-     */
-    private <C extends HadoopComponent> C add(C c) {
-        components.add(c);
-
-        return c;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
deleted file mode 100644
index 1382c1f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.lang.reflect.Constructor;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop job info based on default Hadoop configuration.
- */
-public class HadoopDefaultJobInfo implements HadoopJobInfo, Externalizable {
-    /** */
-    private static final long serialVersionUID = 5489900236464999951L;
-
-    /** {@code true} If job has combiner. */
-    private boolean hasCombiner;
-
-    /** Number of reducers configured for job. */
-    private int numReduces;
-
-    /** Configuration. */
-    private Map<String,String> props = new HashMap<>();
-
-    /** Job name. */
-    private String jobName;
-
-    /** User name. */
-    private String user;
-
-    /**
-     * Default constructor required by {@link Externalizable}.
-     */
-    public HadoopDefaultJobInfo() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param jobName Job name.
-     * @param user User name.
-     * @param hasCombiner {@code true} If job has combiner.
-     * @param numReduces Number of reducers configured for job.
-     * @param props All other properties of the job.
-     */
-    public HadoopDefaultJobInfo(String jobName, String user, boolean hasCombiner, int numReduces,
-        Map<String, String> props) {
-        this.jobName = jobName;
-        this.user = user;
-        this.hasCombiner = hasCombiner;
-        this.numReduces = numReduces;
-        this.props = props;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public String property(String name) {
-        return props.get(name);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJob createJob(Class<? extends HadoopJob> jobCls, HadoopJobId jobId, IgniteLogger log,
-        @Nullable String[] libNames) throws IgniteCheckedException {
-        assert jobCls != null;
-
-        try {
-            Constructor<? extends HadoopJob> constructor = jobCls.getConstructor(HadoopJobId.class,
-                HadoopDefaultJobInfo.class, IgniteLogger.class, String[].class);
-
-            return constructor.newInstance(jobId, this, log, libNames);
-        }
-        catch (Throwable t) {
-            if (t instanceof Error)
-                throw (Error)t;
-            
-            throw new IgniteCheckedException(t);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean hasCombiner() {
-        return hasCombiner;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean hasReducer() {
-        return reducers() > 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int reducers() {
-        return numReduces;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String jobName() {
-        return jobName;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String user() {
-        return user;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeString(out, jobName);
-        U.writeString(out, user);
-
-        out.writeBoolean(hasCombiner);
-        out.writeInt(numReduces);
-
-        U.writeStringMap(out, props);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobName = U.readString(in);
-        user = U.readString(in);
-
-        hasCombiner = in.readBoolean();
-        numReduces = in.readInt();
-
-        props = U.readStringMap(in);
-    }
-
-    /**
-     * @return Properties of the job.
-     */
-    public Map<String, String> properties() {
-        return props;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
deleted file mode 100644
index ed2657e..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.util.GridSpinBusyLock;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop facade implementation.
- */
-public class HadoopImpl implements Hadoop {
-    /** Hadoop processor. */
-    private final HadoopProcessor proc;
-
-    /** Busy lock. */
-    private final GridSpinBusyLock busyLock = new GridSpinBusyLock();
-
-    /**
-     * Constructor.
-     *
-     * @param proc Hadoop processor.
-     */
-    HadoopImpl(HadoopProcessor proc) {
-        this.proc = proc;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration configuration() {
-        return proc.config();
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobId nextJobId() {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.nextJobId();
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to get next job ID (grid is stopping).");
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<?> submit(HadoopJobId jobId, HadoopJobInfo jobInfo) {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.submit(jobId, jobInfo);
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to submit job (grid is stopping).");
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.status(jobId);
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to get job status (grid is stopping).");
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.counters(jobId);
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to get job counters (grid is stopping).");
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.finishFuture(jobId);
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to get job finish future (grid is stopping).");
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException {
-        if (busyLock.enterBusy()) {
-            try {
-                return proc.kill(jobId);
-            }
-            finally {
-                busyLock.leaveBusy();
-            }
-        }
-        else
-            throw new IllegalStateException("Failed to kill job (grid is stopping).");
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
deleted file mode 100644
index 4e03e17..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Iterator;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
-
-/**
- * Hadoop +counter group adapter.
- */
-class HadoopMapReduceCounterGroup implements CounterGroup {
-    /** Counters. */
-    private final HadoopMapReduceCounters cntrs;
-
-    /** Group name. */
-    private final String name;
-
-    /**
-     * Creates new instance.
-     *
-     * @param cntrs Client counters instance.
-     * @param name Group name.
-     */
-    HadoopMapReduceCounterGroup(HadoopMapReduceCounters cntrs, String name) {
-        this.cntrs = cntrs;
-        this.name = name;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getName() {
-        return name;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getDisplayName() {
-        return name;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setDisplayName(String displayName) {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addCounter(Counter counter) {
-        addCounter(counter.getName(), counter.getDisplayName(), 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter addCounter(String name, String displayName, long value) {
-        final Counter counter = cntrs.findCounter(this.name, name);
-
-        counter.setValue(value);
-
-        return counter;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter findCounter(String counterName, String displayName) {
-        return cntrs.findCounter(name, counterName);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter findCounter(String counterName, boolean create) {
-        return cntrs.findCounter(name, counterName, create);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter findCounter(String counterName) {
-        return cntrs.findCounter(name, counterName);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int size() {
-        return cntrs.groupSize(name);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void incrAllCounters(CounterGroupBase<Counter> rightGroup) {
-        for (final Counter counter : rightGroup)
-            cntrs.findCounter(name, counter.getName()).increment(counter.getValue());
-    }
-
-    /** {@inheritDoc} */
-    @Override public CounterGroupBase<Counter> getUnderlyingGroup() {
-        return this;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Iterator<Counter> iterator() {
-        return cntrs.iterateGroup(name);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(DataOutput out) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFields(DataInput in) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-}
\ No newline at end of file


[27/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/art-of-war.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/art-of-war.txt b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/art-of-war.txt
new file mode 100644
index 0000000..8efd211
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/art-of-war.txt
@@ -0,0 +1,6982 @@
+The Project Gutenberg eBook, The Art of War, by Sun Tzu
+
+
+This eBook is for the use of anyone anywhere at no cost and with
+almost no restrictions whatsoever.  You may copy it, give it away or
+re-use it under the terms of the Project Gutenberg License included
+with this eBook or online at www.gutenberg.org
+
+
+Title: The Art of War
+
+Author: Sun Tzu
+
+Translator: Lionel Giles
+
+Release Date: May 1994  [eBook #132]
+[Last updated: January 14, 2012]
+
+Language: English
+
+Character set encoding: ISO-646-US (US-ASCII)
+
+***START OF THE PROJECT GUTENBERG EBOOK THE ART OF WAR ***
+
+Note: Please see Project Gutenberg's eBook #17405 for a version of
+this eBook without the Giles commentary (that is, with only the
+Sun Tzu text).
+
+
+
+                    SUN TZU ON THE ART OF WAR
+
+            THE OLDEST MILITARY TREATISE IN THE WORLD
+
+          Translated from the Chinese with Introduction
+                       and Critical Notes
+
+                               BY
+
+                       LIONEL GILES, M.A.
+
+ Assistant in the Department of Oriental Printed Books and MSS.
+                      in the British Museum
+
+                     First Published in 1910
+
+-----------------------------------------------------------------
+
+                          To my brother
+                  Captain Valentine Giles, R.G.
+                        in the hope that
+                      a work 2400 years old
+           may yet contain lessons worth consideration
+                     by the soldier of today
+                        this translation
+                  is affectionately dedicated.
+
+-----------------------------------------------------------------
+
+Preface to the Project Gutenberg Etext
+--------------------------------------
+
+     When Lionel Giles began his translation of Sun Tzu's ART OF
+WAR, the work was virtually unknown in Europe.  Its introduction
+to Europe began in 1782 when a French Jesuit Father living in
+China, Joseph Amiot, acquired a copy of it, and translated it
+into French.  It was not a good translation because, according to
+Dr. Giles, "[I]t contains a great deal that Sun Tzu did not
+write, and very little indeed of what he did."
+     The first translation into English was published in 1905 in
+Tokyo by Capt. E. F. Calthrop, R.F.A.  However, this translation
+is, in the words of Dr. Giles, "excessively bad."  He goes
+further in this criticism:  "It is not merely a question of
+downright blunders, from which none can hope to be wholly exempt.
+Omissions were frequent; hard passages were willfully distorted
+or slurred over.  Such offenses are less pardonable.  They would
+not be tolerated in any edition of a Latin or Greek classic, and
+a similar standard of honesty ought to be insisted upon in
+translations from Chinese."  In 1908 a new edition of Capt.
+Calthrop's translation was published in London.  It was an
+improvement on the first -- omissions filled up and numerous
+mistakes corrected -- but new errors were created in the process.
+Dr. Giles, in justifying his translation, wrote:  "It was not
+undertaken out of any inflated estimate of my own powers; but I
+could not help feeling that Sun Tzu deserved a better fate than
+had befallen him, and I knew that, at any rate, I could hardly
+fail to improve on the work of my predecessors."
+     Clearly, Dr. Giles' work established much of the groundwork
+for the work of later translators who published their own
+editions.  Of the later editions of the ART OF WAR I have
+examined;  two feature Giles' edited translation and notes,  the
+other two present the same basic information from the ancient
+Chinese commentators found in the Giles edition.  Of these four,
+Giles' 1910 edition is the most scholarly and presents the reader
+an incredible amount of information concerning Sun Tzu's text,
+much more than any other translation.
+     The Giles' edition of the ART OF WAR, as stated above, was a
+scholarly work.  Dr. Giles was a leading sinologue at the time
+and an assistant in the Department of Oriental Printed Books and
+Manuscripts in the British Museum.  Apparently he wanted to
+produce a definitive edition, superior to anything else that
+existed and perhaps something that would become a standard
+translation.  It was the best translation available for 50 years.
+But apparently there was not much interest in Sun Tzu in English-
+speaking countries since it took the start of the Second
+World War to renew interest in his work.  Several people
+published unsatisfactory English translations of Sun Tzu.  In
+1944,  Dr. Giles' translation was edited and published in the
+United States in a series of military science books.  But it
+wasn't until 1963 that a good English translation (by Samuel B.
+Griffith and still in print) was published that was an equal to
+Giles' translation.  While this translation is more lucid than
+Dr. Giles' translation, it lacks his copious notes that make his
+so interesting.
+     Dr. Giles produced a work primarily intended for scholars of
+the Chinese civilization and language.  It contains the Chinese
+text of Sun Tzu, the English translation, and voluminous notes
+along with numerous footnotes.  Unfortunately, some of his notes
+and footnotes contain Chinese characters; some are completely
+Chinese.  Thus,  a conversion to a Latin alphabet etext was
+difficult.  I did the conversion in complete ignorance of Chinese
+(except for what I learned while doing the conversion).  Thus, I
+faced the difficult task of paraphrasing it while retaining as
+much of the important text as I could.  Every paraphrase
+represents a loss; thus I did what I could to retain as much of
+the text as possible.  Because the 1910 text contains a Chinese
+concordance, I was able to transliterate proper names, books, and
+the like at the risk of making the text more obscure.  However,
+the text, on the whole, is quite satisfactory for the casual
+reader, a transformation made possible by conversion to an etext.
+However, I come away from this task with the feeling of loss
+because I know that someone with a background in Chinese can do a
+better job than I did; any such attempt would be welcomed.
+
+                              Bob Sutton
+                              al876@cleveland.freenet.edu
+                              bobs@gnu.ai.mit.edu
+
+-----------------------------------------------------------------
+INTRODUCTION
+
+
+Sun Wu and his Book
+-------------------
+
+
+     Ssu-ma Ch`ien gives the following biography of Sun Tzu:  [1]
+--
+
+       Sun Tzu Wu was a native of the Ch`i State.  His ART OF
+  WAR brought him to the notice of Ho Lu, [2] King of Wu.  Ho
+  Lu said to him:  "I have carefully perused your 13 chapters.
+  May I submit your theory of managing soldiers to a slight
+  test?"
+       Sun Tzu replied:  "You may."
+       Ho Lu asked:  "May the test be applied to women?"
+       The answer was again in the affirmative, so arrangements
+  were made to bring 180 ladies out of the Palace.  Sun Tzu
+  divided them into two companies, and placed one of the King's
+  favorite concubines at the head of each.  He then bade them
+  all take spears in their hands, and addressed them thus:   "I
+  presume you know the difference between front and back, right
+  hand and left hand?"
+       The girls replied:  Yes.
+       Sun Tzu went on:  "When I say "Eyes front,"  you must
+  look straight ahead.  When I say "Left turn," you must face
+  towards your left hand.  When I say "Right turn,"  you must
+  face towards your right hand.  When I say "About turn,"  you
+  must face right round towards your back."
+       Again the girls assented.  The words of command having
+  been thus explained, he set up the halberds and battle-axes
+  in order to begin the drill.  Then, to the sound of drums, he
+  gave the order "Right turn."  But the girls only burst out
+  laughing.  Sun Tzu said:  "If words of command are not clear
+  and distinct, if orders are not thoroughly understood, then
+  the general is to blame."
+       So he started drilling them again, and this time gave
+  the order "Left turn," whereupon the girls once more burst
+  into fits of laughter.  Sun Tzu:  "If words of command are
+  not clear and distinct, if orders are not thoroughly
+  understood, the general is to blame.  But if his orders ARE
+  clear, and the soldiers nevertheless disobey, then it is the
+  fault of their officers."
+       So saying, he ordered the leaders of the two companies
+  to be beheaded.  Now the king of Wu was watching the scene
+  from the top of a raised pavilion; and when he saw that his
+  favorite concubines were about to be executed, he was greatly
+  alarmed and hurriedly sent down the following message:   "We
+  are now quite satisfied as to our general's ability to handle
+  troops.  If We are bereft of these two concubines, our meat
+  and drink will lose their savor.  It is our wish that they
+  shall not be beheaded."
+       Sun Tzu replied:  "Having once received His Majesty's
+  commission to be the general of his forces, there are certain
+  commands of His Majesty which, acting in that capacity, I am
+  unable to accept."
+       Accordingly,  he had the two leaders beheaded,  and
+  straightway installed the pair next in order as leaders in
+  their place.  When this had been done, the drum was sounded
+  for the drill once more; and the girls went through all the
+  evolutions, turning to the right or to the left, marching
+  ahead or wheeling back, kneeling or standing, with perfect
+  accuracy and precision, not venturing to utter a sound.  Then
+  Sun Tzu sent a messenger to the King saying:  "Your soldiers,
+  Sire, are now properly drilled and disciplined, and ready for
+  your majesty's inspection.  They can be put to any use that
+  their sovereign may desire; bid them go through fire and
+  water, and they will not disobey."
+       But the King replied:  "Let our general cease drilling
+  and return to camp.  As for us, We have no wish to come down
+  and inspect the troops."
+       Thereupon Sun Tzu said:  "The King is only fond of
+  words, and cannot translate them into deeds."
+       After that, Ho Lu saw that Sun Tzu was one who knew how
+  to handle an army, and finally appointed him general.  In the
+  west, he defeated the Ch`u State and forced his way into
+  Ying, the capital; to the north he put fear into the States
+  of Ch`i and Chin, and spread his fame abroad amongst the
+  feudal princes.  And Sun Tzu shared in the might of the King.
+
+     About Sun Tzu himself this is all that Ssu-ma Ch`ien has to
+tell us in this chapter.  But he proceeds to give a biography of
+his descendant,  Sun Pin, born about a hundred years after his
+famous ancestor's death, and also the outstanding military genius
+of his time.  The historian speaks of him too as Sun Tzu, and in
+his preface we read:  "Sun Tzu had his feet cut off and yet
+continued to discuss the art of war." [3]  It seems likely, then,
+that  "Pin" was a nickname bestowed on him after his mutilation,
+unless the story was invented in order to account for the name.
+The crowning incident of his career, the crushing defeat of his
+treacherous rival P`ang Chuan, will be found briefly related in
+Chapter V. ss. 19, note.
+     To return to the elder Sun Tzu.  He is mentioned in two
+other passages of the SHIH CHI: --
+
+       In the third year of his reign [512 B.C.] Ho Lu, king of
+  Wu, took the field with Tzu-hsu [i.e. Wu Yuan] and Po P`ei,
+  and attacked Ch`u.  He captured the town of Shu and slew the
+  two prince's sons who had formerly been generals of Wu.  He
+  was then meditating a descent on Ying [the capital]; but the
+  general Sun Wu said:  "The army is exhausted.  It is not yet
+  possible.  We must wait"....  [After further successful
+  fighting,]  "in the ninth year  [506 B.C.],  King Ho Lu
+  addressed Wu Tzu-hsu and Sun Wu, saying:   "Formerly, you
+  declared that it was not yet possible for us to enter Ying.
+  Is the time ripe now?"  The two men replied:  "Ch`u's general
+  Tzu-ch`ang, [4] is grasping and covetous, and the princes of
+  T`ang and Ts`ai both have a grudge against him.  If Your
+  Majesty has resolved to make a grand attack, you must win
+  over T`ang and Ts`ai, and then you may succeed."   Ho Lu
+  followed this advice, [beat Ch`u in five pitched battles and
+  marched into Ying.] [5]
+
+     This is the latest date at which anything is recorded of Sun
+Wu.  He does not appear to have survived his patron, who died
+from the effects of a wound in 496.
+     In another chapter there occurs this passage:  [6]
+
+       From this time onward, a number of famous soldiers
+  arose, one after the other:  Kao-fan, [7] who was employed by
+  the Chin State; Wang-tzu, [8] in the service of Ch`i; and Sun
+  Wu, in the service of Wu.  These men developed and threw
+  light upon the principles of war.
+
+     It is obvious enough that Ssu-ma Ch`ien at least had no
+doubt about the reality of Sun Wu as an historical personage; and
+with one exception, to be noticed presently, he is by far the
+most important authority on the period in question.  It will not
+be necessary, therefore, to say much of such a work as the WU
+YUEH CH`UN CH`IU, which is supposed to have been written by Chao
+Yeh of the 1st century A.D.  The attribution is somewhat
+doubtful; but even if it were otherwise, his account would be of
+little value, based as it is on the SHIH CHI and expanded with
+romantic details.  The story of Sun Tzu will be found, for what
+it is worth, in chapter 2.  The only new points in it worth
+noting are:  (1)  Sun Tzu was first recommended to Ho Lu by Wu
+Tzu-hsu.  (2) He is called a native of Wu.  (3) He had previously
+lived a retired life, and his contemporaries were unaware of his
+ability.
+     The following passage occurs in the Huai-nan Tzu:   "When
+sovereign and ministers show perversity of mind, it is impossible
+even for a Sun Tzu to encounter the foe."  Assuming that this
+work is genuine (and hitherto no doubt has been cast upon it), we
+have here the earliest direct reference for Sun Tzu, for Huai-nan
+Tzu died in 122 B.C., many years before the SHIH CHI was given to
+the world.
+     Liu Hsiang (80-9 B.C.) says:  "The reason why Sun Tzu at the
+head of 30,000 men beat Ch`u with 200,000 is that the latter were
+undisciplined."
+     Teng Ming-shih informs us that the surname "Sun" was
+bestowed on Sun Wu's grandfather by Duke Ching of Ch`i [547-490
+B.C.].  Sun Wu's father Sun P`ing, rose to be a Minister of State
+in Ch`i, and Sun Wu himself, whose style was Ch`ang-ch`ing,  fled
+to Wu on account of the rebellion which was being fomented by the
+kindred of T`ien Pao.  He had three sons, of whom the second,
+named Ming, was the father of Sun Pin.  According to this account
+then, Pin was the grandson of Wu, which, considering that Sun
+Pin's victory over Wei was gained in 341 B.C., may be dismissed
+as chronological impossible.  Whence these data were obtained by
+Teng Ming-shih I do not know, but of course no reliance whatever
+can be placed in them.
+     An interesting document which has survived from the close of
+the Han period is the short preface written by the Great Ts`ao
+Ts`ao, or Wei Wu Ti, for his edition of Sun Tzu.  I shall give it
+in full:  --
+
+       I have heard that the ancients used bows and arrows to
+  their advantage. [10]  The SHU CHU mentions "the army" among
+  the "eight objects of government."  The I CHING says:
+  "'army' indicates firmness and justice;  the experienced
+  leader will have good fortune."  The SHIH CHING says:  "The
+  King rose majestic in his wrath, and he marshaled his
+  troops."  The Yellow Emperor, T`ang the Completer and Wu Wang
+  all used spears and battle-axes in order to succor their
+  generation.  The SSU-MA FA says:  "If one man slay another of
+  set purpose, he himself may rightfully be slain."  He who
+  relies solely on warlike measures shall be exterminated; he
+  who relies solely on peaceful measures shall perish.
+  Instances of this are Fu Ch`ai [11] on the one hand and Yen
+  Wang on the other. [12]  In military matters, the Sage's rule
+  is normally to keep the peace, and to move his forces only
+  when occasion requires.  He will not use armed force unless
+  driven to it by necessity.
+       Many books have I read on the subject of war and
+  fighting; but the work composed by Sun Wu is the profoundest
+  of them all.  [Sun Tzu was a native of the Ch`i state,  his
+  personal name was Wu.  He wrote the ART OF WAR in 13 chapters
+  for Ho Lu, King of Wu.  Its principles were tested on women,
+  and he was subsequently made a general.  He led an army
+  westwards,  crushed the Ch`u state and entered Ying the
+  capital.  In the north, he kept Ch`i and Chin in awe.  A
+  hundred years and more after his time, Sun Pin lived. He was
+  a descendant of Wu.] [13]  In his treatment of deliberation
+  and planning, the importance of rapidity in taking the field,
+  [14] clearness of conception, and depth of design,  Sun Tzu
+  stands beyond the reach of carping criticism.  My
+  contemporaries, however, have failed to grasp the full
+  meaning of his instructions, and while putting into practice
+  the smaller details in which his work abounds,  they have
+  overlooked its essential purport.  That is the motive which
+  has led me to outline a rough explanation of the whole.
+
+     One thing to be noticed in the above is the explicit
+statement that the 13 chapters were specially composed for King
+Ho Lu.  This is supported by the internal evidence of I. ss. 15,
+in which it seems clear that some ruler is addressed.
+     In the bibliographic section of the HAN SHU, there is an
+entry which has given rise to much discussion:  "The works of Sun
+Tzu of Wu in 82 P`IEN (or chapters), with diagrams in 9 CHUAN."
+It is evident that this cannot be merely the 13 chapters known to
+Ssu-ma Ch`ien,  or those we possess today.  Chang Shou-chieh
+refers to an edition of Sun Tzu's ART OF WAR of which the "13
+chapters" formed the first CHUAN, adding that there were two
+other CHUAN besides.  This has brought forth a theory, that the
+bulk of these 82 chapters consisted of other writings of Sun Tzu
+--  we should call them apocryphal -- similar to the WEN TA, of
+which a specimen dealing with the Nine Situations [15] is
+preserved in the T`UNG TIEN, and another in Ho Shin's commentary.
+It is suggested that before his interview with Ho Lu, Sun Tzu had
+only written the 13 chapters, but afterwards composed a sort of
+exegesis in the form of question and answer between himself and
+the King.  Pi I-hsun, the author of the SUN TZU HSU LU, backs
+this up with a quotation from the WU YUEH CH`UN CH`IU:  "The King
+of Wu summoned Sun Tzu, and asked him questions about the art of
+war.  Each time he set forth a chapter of his work, the King
+could not find words enough to praise him."  As he points out, if
+the whole work was expounded on the same scale as in the above-
+mentioned fragments, the total number of chapters could not fail
+to be considerable.  Then the numerous other treatises attributed
+to Sun Tzu might be included.  The fact that the HAN CHIH
+mentions no work of Sun Tzu except the 82 P`IEN, whereas the Sui
+and T`ang bibliographies give the titles of others in addition to
+the "13 chapters," is good proof, Pi I-hsun thinks, that all of
+these were contained in the 82 P`IEN.  Without pinning our faith
+to the accuracy of details supplied by the WU YUEH CH`UN CH`IU,
+or admitting the genuineness of any of the treatises cited by Pi
+I-hsun,  we may see in this theory a probable solution of the
+mystery.  Between Ssu-ma Ch`ien and Pan Ku there was plenty of
+time for a luxuriant crop of forgeries to have grown up under the
+magic name of Sun Tzu, and the 82 P`IEN may very well represent a
+collected edition of these lumped together with the original
+work.  It is also possible, though less likely, that some of them
+existed in the time of the earlier historian and were purposely
+ignored by him. [16]
+     Tu Mu's conjecture seems to be based on a passage which
+states:  "Wei Wu Ti strung together Sun Wu's Art of War," which
+in turn may have resulted from a misunderstanding of the final
+words of Ts`ao King's preface.  This, as Sun Hsing-yen points
+out, is only a modest way of saying that he made an explanatory
+paraphrase, or in other words, wrote a commentary on it.  On the
+whole, this theory has met with very little acceptance.  Thus,
+the SSU K`U CH`UAN SHU says:  "The mention of the 13 chapters in
+the SHIH CHI shows that they were in existence before the HAN
+CHIH, and that latter accretions are not to be considered part of
+the original work.  Tu Mu's assertion can certainly not be taken
+as proof."
+     There is every reason to suppose, then, that the 13 chapters
+existed in the time of Ssu-ma Ch`ien practically as we have them
+now.  That the work was then well known he tells us in so many
+words.  "Sun Tzu's 13 Chapters and Wu Ch`i's Art of War are the
+two books that people commonly refer to on the subject of
+military matters.  Both of them are widely distributed, so I will
+not discuss them here."  But as we go further back, serious
+difficulties begin to arise.  The salient fact which has to be
+faced is that the TSO CHUAN, the greatest contemporary record,
+makes no mention whatsoever of Sun Wu, either as a general or as
+a writer.  It is natural, in view of this awkward circumstance,
+that many scholars should not only cast doubt on the story of Sun
+Wu as given in the SHIH CHI, but even show themselves frankly
+skeptical as to the existence of the man at all.  The most
+powerful presentment of this side of the case is to be found in
+the following disposition by Yeh Shui-hsin: [17] --
+
+       It is stated in Ssu-ma Ch`ien's history that Sun Wu was
+  a native of the Ch`i State, and employed by Wu; and that in
+  the reign of Ho Lu he crushed Ch`u, entered Ying, and was a
+  great general.  But in Tso's Commentary no Sun Wu appears at
+  all.  It is true that Tso's Commentary need not contain
+  absolutely everything that other histories contain.  But Tso
+  has not omitted to mention vulgar plebeians and hireling
+  ruffians such as Ying K`ao-shu, [18] Ts`ao Kuei,  [19],  Chu
+  Chih-wu and Chuan She-chu [20].  In the case of Sun Wu, whose
+  fame and achievements were so brilliant, the omission is much
+  more glaring.  Again, details are given, in their due order,
+  about his contemporaries Wu Yuan and the Minister P`ei.  [21]
+  Is it credible that Sun Wu alone should have been passed
+  over?
+       In point of literary style, Sun Tzu's work belongs to
+  the same school as KUAN TZU, [22] LIU T`AO, [23] and the YUEH
+  YU [24] and may have been the production of some private
+  scholar living towards the end of the "Spring and Autumn" or
+  the beginning of the "Warring States" period. [25]  The story
+  that his precepts were actually applied by the Wu State, is
+  merely the outcome of big talk on the part of his followers.
+       From the flourishing period of the Chou dynasty [26]
+  down to the time of the "Spring and Autumn," all military
+  commanders were statesmen as well, and the class of
+  professional generals, for conducting external campaigns, did
+  not then exist.  It was not until the period of the "Six
+  States" [27] that this custom changed.  Now although Wu was
+  an uncivilized State, it is conceivable that Tso should have
+  left unrecorded the fact that Sun Wu was a great general and
+  yet held no civil office?  What we are told, therefore, about
+  Jang-chu [28] and Sun Wu, is not authentic matter,  but the
+  reckless fabrication of theorizing pundits.  The story of Ho
+  Lu's experiment on the women, in particular, is utterly
+  preposterous and incredible.
+
+     Yeh Shui-hsin represents Ssu-ma Ch`ien as having said that
+Sun Wu crushed Ch`u and entered Ying.  This is not quite correct.
+No doubt the impression left on the reader's mind is that he at
+least shared in these exploits.  The fact may or may not be
+significant; but it is nowhere explicitly stated in the SHIH CHI
+either that Sun Tzu was general on the occasion of the taking of
+Ying, or that he even went there at all.  Moreover, as we know
+that Wu Yuan and Po P`ei both took part in the expedition, and
+also that its success was largely due to the dash and enterprise
+of Fu Kai, Ho Lu's younger brother, it is not easy to see how yet
+another general could have played a very prominent part in the
+same campaign.
+     Ch`en Chen-sun of the Sung dynasty has the note: --
+
+       Military writers look upon Sun Wu as the father of their
+  art.  But the fact that he does not appear in the TSO CHUAN,
+  although he is said to have served under Ho Lu King of Wu,
+  makes it uncertain what period he really belonged to.
+
+He also says: --
+
+       The works of Sun Wu and Wu Ch`i may be of genuine
+  antiquity.
+
+     It is noticeable that both Yeh Shui-hsin and Ch`en Chen-sun,
+while rejecting the personality of Sun Wu as he figures in Ssu-ma
+Ch`ien's history, are inclined to accept the date traditionally
+assigned to the work which passes under his name.  The author of
+the HSU LU fails to appreciate this distinction, and consequently
+his bitter attack on Ch`en Chen-sun really misses its mark.  He
+makes one of two points, however, which certainly tell in favor
+of the high antiquity of our "13 chapters."  "Sun Tzu," he says,
+"must have lived in the age of Ching Wang [519-476], because he
+is frequently plagiarized in subsequent works of the Chou, Ch`in
+and Han dynasties."  The two most shameless offenders in this
+respect are Wu Ch`i and Huai-nan Tzu, both of them important
+historical personages in their day.  The former lived only a
+century after the alleged date of Sun Tzu, and his death is known
+to have taken place in 381 B.C.  It was to him, according to Liu
+Hsiang,  that Tseng Shen delivered the TSO CHUAN, which had been
+entrusted to him by its author.  [29]   Now the fact that
+quotations from the ART OF WAR, acknowledged or otherwise, are to
+be found in so many authors of different epochs, establishes a
+very strong anterior to them all, -- in other words, that Sun
+Tzu's treatise was already in existence towards the end of the
+5th century B.C.  Further proof of Sun Tzu's antiquity is
+furnished by the archaic or wholly obsolete meanings attaching to
+a number of the words he uses.  A list of these, which might
+perhaps be extended, is given in the HSU LU; and though some of
+the interpretations are doubtful, the main argument is hardly
+affected thereby.  Again, it must not be forgotten that Yeh Shui-
+hsin, a scholar and critic of the first rank, deliberately
+pronounces the style of the 13 chapters to belong to the early
+part of the fifth century.  Seeing that he is actually engaged in
+an attempt to disprove the existence of Sun Wu himself, we may be
+sure that he would not have hesitated to assign the work to a
+later date had he not honestly believed the contrary.  And it is
+precisely on such a point that the judgment of an educated
+Chinaman will carry most weight.  Other internal evidence is not
+far to seek.  Thus in XIII. ss. 1, there is an unmistakable
+allusion to the ancient system of land-tenure which had already
+passed away by the time of Mencius, who was anxious to see it
+revived in a modified form. [30]  The only warfare Sun Tzu knows
+is that carried on between the various feudal princes, in which
+armored chariots play a large part.  Their use seems to have
+entirely died out before the end of the Chou dynasty.  He speaks
+as a man of Wu, a state which ceased to exist as early as 473
+B.C.  On this I shall touch presently.
+
+     But once refer the work to the 5th century or earlier,  and
+the chances of its being other than a bona fide production are
+sensibly diminished.  The great age of forgeries did not come
+until long after.  That it should have been forged in the period
+immediately following 473 is particularly unlikely, for no one,
+as a rule, hastens to identify himself with a lost cause.  As for
+Yeh Shui-hsin's theory, that the author was a literary recluse,
+that seems to me quite untenable.  If one thing is more apparent
+than another after reading the maxims of Sun Tzu, it is that
+their essence has been distilled from a large store of personal
+observation and experience.  They reflect the mind not only of a
+born strategist, gifted with a rare faculty of generalization,
+but also of a practical soldier closely acquainted with the
+military conditions of his time.  To say nothing of the fact that
+these sayings have been accepted and endorsed by all the greatest
+captains of Chinese history, they offer a combination of
+freshness and sincerity, acuteness and common sense, which quite
+excludes the idea that they were artificially concocted in the
+study.  If we admit, then, that the 13 chapters were the genuine
+production of a military man living towards the end of the "CH`UN
+CH`IU" period, are we not bound, in spite of the silence of the
+TSO CHUAN, to accept Ssu-ma Ch`ien's account in its entirety?  In
+view of his high repute as a sober historian,  must we not
+hesitate to assume that the records he drew upon for Sun Wu's
+biography were false and untrustworthy?  The answer, I fear, must
+be in the negative.  There is still one grave, if not fatal,
+objection to the chronology involved in the story as told in the
+SHIH CHI, which, so far as I am aware, nobody has yet pointed
+out.  There are two passages in Sun Tzu in which he alludes to
+contemporary affairs.  The first in in VI. ss. 21: --
+
+       Though according to my estimate the soldiers of Yueh
+  exceed our own in number, that shall advantage them nothing
+  in the matter of victory.  I say then that victory can be
+  achieved.
+
+The other is in XI. ss. 30: --
+
+       Asked if an army can be made to imitate the SHUAI-JAN, I
+  should answer, Yes.  For the men of Wu and the men of Yueh
+  are enemies;  yet if they are crossing a river in the same
+  boat and are caught by a storm, they will come to each
+  other's assistance just as the left hand helps the right.
+
+     These two paragraphs are extremely valuable as evidence of
+the date of composition.  They assign the work to the period of
+the struggle between Wu and Yueh.  So much has been observed by
+Pi I-hsun.  But what has hitherto escaped notice is that they
+also seriously impair the credibility of Ssu-ma Ch`ien's
+narrative.  As we have seen above, the first positive date given
+in connection with Sun Wu is 512 B.C.  He is then spoken of as a
+general,  acting as confidential adviser to Ho Lu, so that his
+alleged introduction to that monarch had already taken place, and
+of course the 13 chapters must have been written earlier still.
+But at that time, and for several years after, down to the
+capture of Ying in 506, Ch`u and not Yueh, was the great
+hereditary enemy of Wu.  The two states, Ch`u and Wu, had been
+constantly at war for over half a century, [31] whereas the first
+war between Wu and Yueh was waged only in 510, [32] and even then
+was no more than a short interlude sandwiched in the midst of the
+fierce struggle with Ch`u.  Now Ch`u is not mentioned in the 13
+chapters at all.  The natural inference is that they were written
+at a time when Yueh had become the prime antagonist of Wu, that
+is, after Ch`u had suffered the great humiliation of 506.  At
+this point, a table of dates may be found useful.
+
+B.C. |
+     |
+514  |  Accession of Ho Lu.
+512  |  Ho Lu attacks Ch`u, but is dissuaded from entering Ying,
+     |    the capital.  SHI CHI mentions Sun Wu as general.
+511  |  Another attack on Ch`u.
+510  |  Wu makes a successful attack on Yueh.  This is the first
+     |    war between the two states.
+509  |
+ or  |  Ch`u invades Wu, but is signally defeated at Yu-chang.
+508  |
+506  |  Ho Lu attacks Ch`u with the aid of T`ang and Ts`ai.
+     |    Decisive battle of Po-chu, and capture of Ying.  Last
+     |    mention of Sun Wu in SHIH CHI.
+505  |  Yueh makes a raid on Wu in the absence of its army.  Wu
+     |    is beaten by Ch`in and evacuates Ying.
+504  |  Ho Lu sends Fu Ch`ai to attack Ch`u.
+497  |  Kou Chien becomes King of Yueh.
+496  |  Wu attacks Yueh, but is defeated by Kou Chien at Tsui-li.
+     |    Ho Lu is killed.
+494  |  Fu Ch`ai defeats Kou Chien in the great battle of Fu-
+     |    chaio, and enters the capital of Yueh.
+485  |
+ or  |  Kou Chien renders homage to Wu.  Death of Wu Tzu-hsu.
+484  |
+482  |  Kou Chien invades Wu in the absence of Fu Ch`ai.
+478  |
+ to  |  Further attacks by Yueh on Wu.
+476  |
+475  |  Kou Chien lays siege to the capital of Wu.
+473  |  Final defeat and extinction of Wu.
+
+     The sentence quoted above from VI. ss. 21 hardly strikes me
+as one that could have been written in the full flush of victory.
+It seems rather to imply that, for the moment at least, the tide
+had turned against Wu, and that she was getting the worst of the
+struggle.  Hence we may conclude that our treatise was not in
+existence in 505, before which date Yueh does not appear to have
+scored any notable success against Wu.  Ho Lu died in 496,  so
+that if the book was written for him, it must have been during
+the period 505-496, when there was a lull in the hostilities,  Wu
+having presumably exhausted by its supreme effort against Ch`u.
+On the other hand, if we choose to disregard the tradition
+connecting Sun Wu's name with Ho Lu, it might equally well have
+seen the light between 496 and 494, or possibly in the period
+482-473, when Yueh was once again becoming a very serious menace.
+[33]  We may feel fairly certain that the author, whoever he may
+have been, was not a man of any great eminence in his own day.
+On this point the negative testimony of the TSO CHUAN far
+outweighs any shred of authority still attaching to the SHIH CHI,
+if once its other facts are discredited.  Sun Hsing-yen, however,
+makes a feeble attempt to explain the omission of his name from
+the great commentary.  It was Wu Tzu-hsu, he says, who got all
+the credit of Sun Wu's exploits, because the latter  (being an
+alien) was not rewarded with an office in the State.
+     How then did the Sun Tzu legend originate?  It may be that
+the growing celebrity of the book imparted by degrees a kind of
+factitious renown to its author.  It was felt to be only right
+and proper that one so well versed in the science of war should
+have solid achievements to his credit as well.  Now the capture
+of Ying was undoubtedly the greatest feat of arms in Ho Lu's
+reign;  it made a deep and lasting impression on all the
+surrounding states, and raised Wu to the short-lived zenith of
+her power.  Hence, what more natural, as time went on, than that
+the acknowledged master of strategy, Sun Wu, should be popularly
+identified with that campaign, at first perhaps only in the sense
+that his brain conceived and planned it; afterwards, that it was
+actually carried out by him in conjunction with Wu Yuan, [34]  Po
+P`ei and Fu Kai?
+     It is obvious that any attempt to reconstruct even the
+outline of Sun Tzu's life must be based almost wholly on
+conjecture.  With this necessary proviso, I should say that he
+probably entered the service of Wu about the time of Ho Lu's
+accession,  and gathered experience, though only in the capacity
+of a subordinate officer, during the intense military activity
+which marked the first half of the prince's reign. [35]   If he
+rose to be a general at all, he certainly was never on an equal
+footing with the three above mentioned.  He was doubtless present
+at the investment and occupation of Ying,  and witnessed Wu's
+sudden collapse in the following year.  Yueh's attack at this
+critical juncture, when her rival was embarrassed on every side,
+seems to have convinced him that this upstart kingdom was the
+great enemy against whom every effort would henceforth have to be
+directed.  Sun Wu was thus a well-seasoned warrior when he sat
+down to write his famous book, which according to my reckoning
+must have appeared towards the end, rather than the beginning of
+Ho Lu's reign.  The story of the women may possibly have grown
+out of some real incident occurring about the same time.  As we
+hear no more of Sun Wu after this from any source, he is hardly
+likely to have survived his patron or to have taken part in the
+death-struggle with Yueh, which began with the disaster at Tsui-
+li.
+     If these inferences are approximately correct, there is a
+certain irony in the fate which decreed that China's most
+illustrious man of peace should be contemporary with her greatest
+writer on war.
+
+
+The Text of Sun Tzu
+-------------------
+
+
+     I have found it difficult to glean much about the history of
+Sun Tzu's text.  The quotations that occur in early authors go to
+show that the "13 chapters" of which Ssu-ma Ch`ien speaks were
+essentially the same as those now extant.  We have his word for
+it that they were widely circulated in his day,  and can only
+regret that he refrained from discussing them on that account.
+Sun Hsing-yen says in his preface: --
+
+       During the Ch`in and Han dynasties Sun Tzu's ART OF WAR
+  was in general use amongst military commanders, but they seem
+  to have treated it as a work of mysterious import, and were
+  unwilling to expound it for the benefit of posterity.  Thus
+  it came about that Wei Wu was the first to write a commentary
+  on it.
+
+     As we have already seen, there is no reasonable ground to
+suppose that Ts`ao Kung tampered with the text.  But the text
+itself is often so obscure, and the number of editions which
+appeared from that time onward so great, especially during the
+T`ang and Sung dynasties, that it would be surprising if numerous
+corruptions had not managed to creep in.  Towards the middle of
+the Sung period, by which time all the chief commentaries on Sun
+Tzu were in existence, a certain Chi T`ien-pao published a work
+in 15 CHUAN entitled "Sun Tzu with the collected commentaries of
+ten writers."  There was another text, with variant readings put
+forward by Chu Fu of Ta-hsing, which also had supporters among
+the scholars of that period; but in the Ming editions, Sun Hsing-
+yen tells us, these readings were for some reason or other no
+longer put into circulation.  Thus, until the end of the 18th
+century, the text in sole possession of the field was one derived
+from Chi T`ien-pao's edition, although no actual copy of that
+important work was known to have survived.  That, therefore,  is
+the text of Sun Tzu which appears in the War section of the great
+Imperial encyclopedia printed in 1726, the KU CHIN T`U SHU CHI
+CH`ENG.  Another copy at my disposal of what is practically the
+same text,  with slight variations, is that contained in the
+"Eleven philosophers of the Chou and Ch`in dynasties"  [1758].
+And the Chinese printed in Capt. Calthrop's first edition is
+evidently a similar version which has filtered through Japanese
+channels.  So things remained until Sun Hsing-yen [1752-1818],  a
+distinguished antiquarian and classical scholar, who claimed to
+be an actual descendant of Sun Wu, [36] accidentally discovered a
+copy of Chi T`ien-pao's long-lost work, when on a visit to the
+library of the Hua-yin temple. [37]  Appended to it was the I
+SHUO of Cheng Yu-Hsien, mentioned in the T`UNG CHIH,  and also
+believed to have perished.  This is what Sun Hsing-yen designates
+as the "original edition (or text)" -- a rather misleading name,
+for it cannot by any means claim to set before us the text of Sun
+Tzu in its pristine purity.  Chi T`ien-pao was a careless
+compiler,  and appears to have been content to reproduce the
+somewhat debased version current in his day, without troubling to
+collate   it   with the earliest   editions   then   available.
+Fortunately,  two versions of Sun Tzu, even older than the newly
+discovered work, were still extant, one buried in the T`UNG TIEN,
+Tu Yu's great treatise on the Constitution, the other similarly
+enshrined in the T`AI P`ING YU LAN encyclopedia.  In both the
+complete text is to be found, though split up into fragments,
+intermixed with other matter, and scattered piecemeal over a
+number of different sections.  Considering that the YU LAN takes
+us back to the year 983, and the T`UNG TIEN about 200 years
+further still, to the middle of the T`ang dynasty, the value of
+these early transcripts of Sun Tzu can hardly be overestimated.
+Yet the idea of utilizing them does not seem to have occurred to
+anyone until Sun Hsing-yen, acting under Government instructions,
+undertook a thorough recension of the text.  This is his own
+account: --
+
+       Because of the numerous mistakes in the text of Sun Tzu
+  which his editors had handed down, the Government ordered
+  that the ancient edition [of Chi T`ien-pao] should be used,
+  and that the text should be revised and corrected throughout.
+  It happened that Wu Nien-hu, the Governor Pi Kua, and Hsi,  a
+  graduate of the second degree, had all devoted themselves to
+  this study, probably surpassing me therein.  Accordingly,  I
+  have had the whole work cut on blocks as a textbook for
+  military men.
+
+     The three individuals here referred to had evidently been
+occupied on the text of Sun Tzu prior to Sun Hsing-yen's
+commission,  but we are left in doubt as to the work they really
+accomplished.  At any rate, the new edition,  when ultimately
+produced, appeared in the names of Sun Hsing-yen and only one co-
+editor Wu Jen-shi.  They took the "original edition"  as their
+basis, and by careful comparison with older versions, as well as
+the extant commentaries and other sources of information such as
+the I SHUO,  succeeded in restoring a very large number of
+doubtful passages,  and turned out, on the whole, what must be
+accepted as the closes approximation we are ever likely to get to
+Sun Tzu's original work.  This is what will hereafter be
+denominated the "standard text."
+     The copy which I have used belongs to a reissue dated 1877.
+it is in 6 PEN, forming part of a well-printed set of 23 early
+philosophical works in 83 PEN. [38]  It opens with a preface by
+Sun Hsing-yen (largely quoted in this introduction),  vindicating
+the traditional view of Sun Tzu's life and performances,  and
+summing up in remarkably concise fashion the evidence in its
+favor.  This is followed by Ts`ao Kung's preface to his edition,
+and the biography of Sun Tzu from the SHIH CHI, both translated
+above.  Then come, firstly, Cheng Yu-hsien's I SHUO,  [39]  with
+author's preface, and next, a short miscellany of historical and
+bibliographical information entitled SUN TZU HSU LU, compiled by
+Pi I-hsun.  As regards the body of the work,  each separate
+sentence is followed by a note on the text, if required, and then
+by the various commentaries appertaining to it,  arranged in
+chronological order.  These we shall now proceed to discuss
+briefly, one by one.
+
+
+The Commentators
+----------------
+
+
+     Sun Tzu can boast an exceptionally long distinguished roll
+of commentators, which would do honor to any classic.  Ou-yang
+Hsiu remarks on this fact, though he wrote before the tale was
+complete,  and rather ingeniously explains it by saying that the
+artifices   of war,  being inexhaustible,  must therefore   be
+susceptible of treatment in a great variety of ways.
+
+     1.  TS`AO TS`AO or Ts`ao Kung, afterwards known as Wei Wu Ti
+[A.D.  155-220].  There is hardly any room for doubt that the
+earliest commentary on Sun Tzu actually came from the pen of this
+extraordinary man, whose biography in the SAN KUO CHIH reads like
+a romance.  One of the greatest military geniuses that the world
+has seen, and Napoleonic in the scale of his operations, he was
+especially famed for the marvelous rapidity of his marches, which
+has found expression in the line "Talk of Ts`ao Ts`ao, and Ts`ao
+Ts`ao will appear."  Ou-yang Hsiu says of him that he was a great
+captain who "measured his strength against Tung Cho, Lu Pu and
+the two Yuan, father and son, and vanquished them all;  whereupon
+he divided the Empire of Han with Wu and Shu, and made himself
+king.  It is recorded that whenever a council of war was held by
+Wei on the eve of a far-reaching campaign,  he had all his
+calculations ready; those generals who made use of them did not
+lose one battle in ten; those who ran counter to them in any
+particular saw their armies incontinently beaten and put to
+flight."   Ts`ao Kung's notes on Sun Tzu,  models of austere
+brevity, are so thoroughly characteristic of the stern commander
+known to history, that it is hard indeed to conceive of them as
+the work of a mere LITTERATEUR.  Sometimes,  indeed,  owing to
+extreme compression, they are scarcely intelligible and stand no
+less in need of a commentary than the text itself. [40]
+
+     2.  MENG SHIH.  The commentary which has come down to us
+under this name is comparatively meager, and nothing about the
+author is known.  Even his personal name has not been recorded.
+Chi T`ien-pao's edition places him after Chia Lin,and Ch`ao Kung-
+wu also assigns him to the T`ang dynasty, [41] but this is a
+mistake.  In Sun Hsing-yen's preface, he appears as Meng Shih of
+the Liang dynasty [502-557].  Others would identify him with Meng
+K`ang of the 3rd century.  He is named in one work as the last of
+the "Five Commentators," the others being Wei Wu Ti, Tu Mu, Ch`en
+Hao and Chia Lin.
+
+     3.  LI CH`UAN of the 8th century was a well-known writer on
+military tactics.  One of his works has been in constant use down
+to the present day.  The T`UNG CHIH mentions "Lives of famous
+generals from the Chou to the T`ang dynasty" as written by him.
+[42]  According to Ch`ao Kung-wu and the T`IEN-I-KO catalogue, he
+followed a variant of the text of Sun Tzu which differs
+considerably from those now extant.  His notes are mostly short
+and to the point, and he frequently illustrates his remarks by
+anecdotes from Chinese history.
+
+     4.  TU YU (died 812) did not publish a separate commentary
+on Sun Tzu,  his notes being taken from the T`UNG TIEN,  the
+encyclopedic treatise on the Constitution which was his life-
+work.  They are largely repetitions of Ts`ao Kung and Meng Shih,
+besides which it is believed that he drew on the ancient
+commentaries of Wang Ling and others.  Owing to the peculiar
+arrangement of T`UNG TIEN, he has to explain each passage on its
+merits, apart from the context, and sometimes his own explanation
+does not agree with that of Ts`ao Kung, whom he always quotes
+first.  Though not strictly to be reckoned as one of the  "Ten
+Commentators,"  he was added to their number by Chi T`ien-pao,
+being wrongly placed after his grandson Tu Mu.
+
+     5.  TU MU (803-852) is perhaps the best known as a poet -- a
+bright star even in the glorious galaxy of the T`ang period.  We
+learn from Ch`ao Kung-wu that although he had no practical
+experience of war,  he was extremely fond of discussing the
+subject,  and was moreover well read in the military history of
+the CH`UN CH`IU and CHAN KUO eras.  His notes,  therefore,  are
+well worth attention.  They are very copious, and replete with
+historical parallels.  The gist of Sun Tzu's work is thus
+summarized by him:  "Practice benevolence and justice, but on the
+other hand make full use of artifice and measures of expediency."
+He further declared that all the military triumphs and disasters
+of the thousand years which had elapsed since Sun Tzu's death
+would,  upon examination, be found to uphold and corroborate,  in
+every particular,  the maxims contained in his book.  Tu Mu's
+somewhat spiteful charge against Ts`ao Kung has already been
+considered elsewhere.
+
+     6.  CH`EN HAO appears to have been a contemporary of Tu Mu.
+Ch`ao Kung-wu says that he was impelled to write a new commentary
+on Sun Tzu because Ts`ao Kung's on the one hand was too obscure
+and subtle, and that of Tu Mu on the other too long-winded and
+diffuse.  Ou-yang Hsiu,  writing in the middle of the 11th
+century,  calls Ts`ao Kung, Tu Mu and Ch`en Hao the three chief
+commentators on Sun Tzu,  and observes that Ch`en Hao   is
+continually attacking Tu Mu's shortcomings.  His commentary,
+though not lacking in merit, must rank below those of his
+predecessors.
+
+     7.  CHIA LIN is known to have lived under the T`ang dynasty,
+for his commentary on Sun Tzu is mentioned in the T`ang Shu and
+was afterwards republished by Chi Hsieh of the same dynasty
+together with those of Meng Shih and Tu Yu.  It is of somewhat
+scanty texture, and in point of quality, too, perhaps the least
+valuable of the eleven.
+
+     8.  MEI YAO-CH`EN (1002-1060), commonly known by his "style"
+as Mei Sheng-yu, was, like Tu Mu, a poet of distinction.  His
+commentary was published with a laudatory preface by the great
+Ou-yang Hsiu, from which we may cull the following: --
+
+       Later scholars have misread Sun Tzu,  distorting his
+  words and trying to make them square with their own one-sided
+  views.  Thus, though commentators have not been lacking, only
+  a few have proved equal to the task.  My friend Sheng-yu has
+  not fallen into this mistake.  In attempting to provide a
+  critical commentary for Sun Tzu's work, he does not lose
+  sight of the fact that these sayings were intended for states
+  engaged in internecine warfare; that the author is not
+  concerned with the military conditions prevailing under the
+  sovereigns of the three ancient dynasties, [43] nor with the
+  nine punitive measures prescribed to the Minister of War.
+  [44]  Again, Sun Wu loved brevity of diction, but his meaning
+  is always deep.  Whether the subject be marching an army,  or
+  handling soldiers, or estimating the enemy,  or controlling
+  the forces of victory, it is always systematically treated;
+  the sayings are bound together in strict logical sequence,
+  though this has been obscured by commentators who have
+  probably   failed to grasp their meaning.  In his   own
+  commentary, Mei Sheng-yu has brushed aside all the obstinate
+  prejudices of these critics, and has tried to bring out the
+  true meaning of Sun Tzu himself.  In this way, the clouds of
+  confusion have been dispersed and the sayings made clear.  I
+  am convinced that the present work deserves to be handed down
+  side by side with the three great commentaries; and for a
+  great deal that they find in the sayings, coming generations
+  will have constant reason to thank my friend Sheng-yu.
+
+     Making some allowance for the exuberance of friendship, I am
+inclined to endorse this favorable judgment, and would certainly
+place him above Ch`en Hao in order of merit.
+
+     9.  WANG HSI,  also of the Sung dynasty,  is decidedly
+original in some of his interpretations, but much less judicious
+than Mei Yao-ch`en,  and on the whole not a very trustworthy
+guide.  He is fond of comparing his own commentary with that of
+Ts`ao Kung, but the comparison is not often flattering to him.
+We learn from Ch`ao Kung-wu that Wang Hsi revised the ancient
+text of Sun Tzu, filling up lacunae and correcting mistakes. [45]
+
+     10.  HO YEN-HSI of the Sung dynasty.  The personal name of
+this commentator is given as above by Cheng Ch`iao in the TUNG
+CHIH,  written about the middle of the twelfth century,  but he
+appears simply as Ho Shih in the YU HAI, and Ma Tuan-lin quotes
+Ch`ao Kung-wu as saying that his personal name is unknown.  There
+seems to be no reason to doubt Cheng Ch`iao's statement,
+otherwise I should have been inclined to hazard a guess and
+identify him with one Ho Ch`u-fei, the author of a short treatise
+on war,  who lived in the latter part of the 11th century.  Ho
+Shih's commentary,  in the words of the T`IEN-I-KO catalogue,
+"contains helpful additions"  here and there,  but is chiefly
+remarkable for the copious extracts taken, in adapted form,  from
+the dynastic histories and other sources.
+
+     11.  CHANG YU.  The list closes with a commentator of no
+great originality perhaps, but gifted with admirable powers of
+lucid exposition.  His commentator is based on that of Ts`ao
+Kung, whose terse sentences he contrives to expand and develop in
+masterly fashion.  Without Chang Yu, it is safe to say that much
+of Ts`ao Kung's commentary would have remained cloaked in its
+pristine obscurity and therefore valueless.  His work is not
+mentioned in the Sung history, the T`UNG K`AO, or the YU HAI, but
+it finds a niche in the T`UNG CHIH, which also names him as the
+author of the "Lives of Famous Generals." [46]
+     It is rather remarkable that the last-named four should all
+have flourished within so short a space of time.  Ch`ao Kung-wu
+accounts for it by saying:  "During the early years of the Sung
+dynasty the Empire enjoyed a long spell of peace, and men ceased
+to practice the art of war.  but when [Chao] Yuan-hao's rebellion
+came [1038-42] and the frontier generals were defeated time after
+time,  the Court made strenuous inquiry for men skilled in war,
+and military topics became the vogue amongst all the high
+officials.  Hence it is that the commentators of Sun Tzu in our
+dynasty belong mainly to that period. [47]
+
+     Besides these eleven commentators, there are several others
+whose work has not come down to us.  The SUI SHU mentions four,
+namely Wang Ling (often quoted by Tu Yu as Wang Tzu); Chang Tzu-
+shang;  Chia Hsu of Wei; [48] and Shen Yu of Wu.  The T`ANG SHU
+adds Sun Hao, and the T`UNG CHIH Hsiao Chi, while the T`U SHU
+mentions a Ming commentator, Huang Jun-yu.  It is possible that
+some of these may have been merely collectors and editors of
+other commentaries, like Chi T`ien-pao and Chi Hsieh,  mentioned
+above.
+
+
+Appreciations of Sun Tzu
+------------------------
+
+
+     Sun Tzu has exercised a potent fascination over the minds of
+some of China's greatest men.  Among the famous generals who are
+known to have studied his pages with enthusiasm may be mentioned
+Han Hsin (d. 196 B.C.), [49] Feng I (d. 34 A.D.), [50]  Lu Meng
+(d. 219), [51] and Yo Fei (1103-1141). [52]  The opinion of Ts`ao
+Kung,  who disputes with Han Hsin the highest place in Chinese
+military annals,  has already been recorded.  [53]   Still more
+remarkable, in one way, is the testimony of purely literary men,
+such as Su Hsun (the father of Su Tung-p`o), who wrote several
+essays on military topics,  all of which owe their   chief
+inspiration to Sun Tzu.  The following short passage by him is
+preserved in the YU HAI: [54] --
+
+       Sun Wu's saying, that in war one cannot make certain of
+  conquering,  [55]  is very different indeed from what other
+  books tell us. [56]  Wu Ch`i was a man of the same stamp as
+  Sun Wu:  they both wrote books on war, and they are linked
+  together in popular speech as "Sun and Wu."  But Wu Ch`i's
+  remarks on war are less weighty, his rules are rougher and
+  more crudely stated, and there is not the same unity of plan
+  as in Sun Tzu's work, where the style is terse,  but the
+  meaning fully brought out.
+
+     The following is an extract from the "Impartial Judgments in
+the Garden of Literature" by Cheng Hou: --
+
+       Sun Tzu's 13 chapters are not only the staple and base
+  of all military men's training, but also compel the most
+  careful attention of scholars and men of letters.  His
+  sayings   are terse yet elegant,  simple   yet   profound,
+  perspicuous and eminently practical.  Such works as the LUN
+  YU, the I CHING and the great Commentary, [57] as well as the
+  writings of Mencius, Hsun K`uang and Yang Chu, all fall below
+  the level of Sun Tzu.
+
+     Chu Hsi, commenting on this, fully admits the first part of
+the criticism, although he dislikes the audacious comparison with
+the venerated classical works.  Language of this sort, he says,
+"encourages a ruler's bent towards unrelenting warfare and
+reckless militarism."
+
+
+Apologies for War
+-----------------
+
+
+     Accustomed as we are to think of China as the greatest
+peace-loving nation on earth, we are in some danger of forgetting
+that her experience of war in all its phases has also been such
+as no modern State can parallel.  Her long military annals
+stretch back to a point at which they are lost in the mists of
+time.  She had built the Great Wall and was maintaining a huge
+standing army along her frontier centuries before the first Roman
+legionary was seen on the Danube.  What with the perpetual
+collisions of the ancient feudal States, the grim conflicts with
+Huns,  Turks and other invaders after the centralization of
+government,   the terrific upheavals which   accompanied   the
+overthrow of so many dynasties, besides the countless rebellions
+and minor disturbances that have flamed up and flickered out
+again one by one, it is hardly too much to say that the clash of
+arms has never ceased to resound in one portion or another of the
+Empire.
+     No less remarkable is the succession of illustrious captains
+to whom China can point with pride.  As in all countries,  the
+greatest are fond of emerging at the most fateful crises of her
+history.  Thus, Po Ch`i stands out conspicuous in the period when
+Ch`in was entering upon her final struggle with the remaining
+independent states.  The stormy years which followed the break-up
+of the Ch`in dynasty are illuminated by the transcendent genius
+of Han Hsin.  When the House of Han in turn is tottering to its
+fall,  the great and baleful figure of Ts`ao Ts`ao dominates the
+scene.  And in the establishment of the T`ang dynasty,one of the
+mightiest tasks achieved by man, the superhuman energy of Li
+Shih-min (afterwards the Emperor T`ai Tsung) was seconded by the
+brilliant strategy of Li Ching.  None of these generals need fear
+comparison with the greatest names in the military history of
+Europe.
+     In spite of all this, the great body of Chinese sentiment,
+from Lao Tzu downwards, and especially as reflected in the
+standard literature of Confucianism,  has been   consistently
+pacific and intensely opposed to militarism in any form.  It is
+such an uncommon thing to find any of the literati defending
+warfare on principle,  that I have thought it worth while to
+collect and translate a few passages in which the unorthodox view
+is upheld.  The following, by Ssu-ma Ch`ien, shows that for all
+his ardent admiration of Confucius, he was yet no advocate of
+peace at any price: --
+
+       Military weapons are the means used by the Sage to
+  punish violence and cruelty, to give peace to troublous
+  times,  to remove difficulties and dangers,  and to succor
+  those who are in peril.  Every animal with blood in its veins
+  and horns on its head will fight when it is attacked.  How
+  much more so will man, who carries in his breast the
+  faculties of love and hatred, joy and anger!   When he is
+  pleased,  a feeling of affection springs up within him;  when
+  angry, his poisoned sting is brought into play.  That is the
+  natural law which governs his being....  What then shall be
+  said of those scholars of our time,  blind to all great
+  issues, and without any appreciation of relative values,  who
+  can only bark out their stale formulas about  "virtue"  and
+  "civilization," condemning the use of military weapons?  They
+  will surely bring our country to impotence and dishonor and
+  the loss of her rightful heritage; or, at the very least,
+  they will bring about invasion and rebellion,  sacrifice of
+  territory and general enfeeblement.  Yet they obstinately
+  refuse to modify the position they have taken up.  The truth
+  is that, just as in the family the teacher must not spare the
+  rod,  and punishments cannot be dispensed with in the State,
+  so military chastisement can never be allowed to fall into
+  abeyance in the Empire.  All one can say is that this power
+  will be exercised wisely by some, foolishly by others,  and
+  that among those who bear arms some will be loyal and others
+  rebellious. [58]
+
+     The next piece is taken from Tu Mu's preface to his
+commentary on Sun Tzu: --
+
+       War may be defined as punishment, which is one of the
+  functions of government.  It was the profession of Chung Yu
+  and Jan Ch`iu, both disciples of Confucius.  Nowadays,  the
+  holding of trials and hearing of litigation, the imprisonment
+  of offenders and their execution by flogging in the market-
+  place,  are all done by officials.  But the wielding of huge
+  armies, the throwing down of fortified cities, the hauling of
+  women and children into captivity, and the beheading of
+  traitors  --  this is also work which is done by officials.
+  The objects of the rack and of military weapons   are
+  essentially the same.  There is no intrinsic difference
+  between the punishment of flogging and cutting off heads in
+  war.  For the lesser infractions of law, which are easily
+  dealt with, only a small amount of force need be employed:
+  hence the use of military weapons and wholesale decapitation.
+  In both cases, however, the end in view is to get rid of
+  wicked people, and to give comfort and relief to the good....
+       Chi-sun asked Jan Yu, saying:  "Have you, Sir,  acquired
+  your military aptitude by study, or is it innate?"   Jan Yu
+  replied:   "It has been acquired by study." [59]   "How can
+  that be so," said Chi-sun, "seeing that you are a disciple of
+  Confucius?"  "It is a fact," replied Jan Yu; "I was taught by
+  Confucius.  It is fitting that the great Sage should exercise
+  both civil and military functions, though to be sure my
+  instruction in the art of fighting has not yet gone very
+  far."
+       Now,  who the author was of this rigid distinction
+  between the "civil" and the "military," and the limitation of
+  each to a separate sphere of action, or in what year of which
+  dynasty it was first introduced, is more than I can say.
+  But,  at any rate, it has come about that the members of the
+  governing class are quite afraid of enlarging on military
+  topics,  or do so only in a shamefaced manner.  If any are
+  bold enough to discuss the subject, they are at once set down
+  as eccentric individuals of coarse and brutal propensities.
+  This is an extraordinary instance in which,  through sheer
+  lack of reasoning, men unhappily lose sight of fundamental
+  principles.
+       When the Duke of Chou was minister under Ch`eng Wang, he
+  regulated ceremonies and made music, and venerated the arts
+  of scholarship and learning; yet when the barbarians of the
+  River Huai revolted, [60] he sallied forth and chastised
+  them.  When Confucius held office under the Duke of Lu, and a
+  meeting was convened at Chia-ku, [61] he said:  "If pacific
+  negotiations are in progress, warlike preparations should
+  have been made beforehand."  He rebuked and shamed the
+  Marquis of Ch`i, who cowered under him and dared not proceed
+  to violence.  How can it be said that these two great Sages
+  had no knowledge of military matters?
+
+     We have seen that the great Chu Hsi held Sun Tzu in high
+esteem.  He also appeals to the authority of the Classics: --
+
+       Our Master Confucius, answering Duke Ling of Wei,  said:
+  "I have never studied matters connected with armies and
+  battalions."  [62]   Replying to K`ung Wen-tzu, he said:   I
+  have not been instructed about buff-coats and weapons."   But
+  if we turn to the meeting at Chia-ku, we find that he used
+  armed force against the men of Lai, so that the marquis of
+  Ch`i was overawed.  Again,  when the inhabitants of Pi
+  revolted, the ordered his officers to attack them,  whereupon
+  they were defeated and fled in confusion.  He once uttered
+  the words:  "If I fight, I conquer." [63]  And Jan Yu also
+  said:    "The   Sage exercises both civil   and   military
+  functions."  [64]   Can it be a fact that Confucius never
+  studied or received instruction in the art of war?   We can
+  only say that he did not specially choose matters connected
+  with armies and fighting to be the subject of his teaching.
+
+     Sun Hsing-yen,  the editor of Sun Tzu,  writes in similar
+strain: --
+
+       Confucius said:  "I am unversed in military matters."
+  [65]  He also said:  "If I fight,  I conquer."   Confucius
+  ordered ceremonies and regulated music.  Now war constitutes
+  one of the five classes of State ceremonial, [66]  and must
+  not be treated as an independent branch of study.  Hence, the
+  words "I am unversed in" must be taken to mean that there are
+  things which even an inspired Teacher does not know.  Those
+  who have to lead an army and devise stratagems,  must learn
+  the art of war.  But if one can command the services of a
+  good general like Sun Tzu, who was employed by Wu Tzu-hsu,
+  there is no need to learn it oneself.  Hence the remark added
+  by Confucius:  "If I fight, I conquer."
+       The men of the present day, however, willfully interpret
+  these words of Confucius in their narrowest sense, as though
+  he meant that books on the art of war were not worth reading.
+  With blind persistency, they adduce the example of Chao Kua,
+  who pored over his father's books to no purpose, [67]  as a
+  proof that all military theory is useless.  Again,  seeing
+  that books on war have to do with such things as opportunism
+  in designing plans, and the conversion of spies,  they hold
+  that the art is immoral and unworthy of a sage.  These people
+  ignore the fact that the studies of our scholars and the
+  civil administration of our officials also require steady
+  application and practice before efficiency is reached.  The
+  ancients were particularly chary of allowing mere novices to
+  botch their work. [68]  Weapons are baneful [69] and fighting
+  perilous;  and useless unless a general is in constant
+  practice, he ought not to hazard other men's lives in battle.
+  [70]  Hence it is essential that Sun Tzu's 13 chapters should
+  be studied.
+      Hsiang Liang used to instruct his nephew Chi [71] in the
+  art of war.  Chi got a rough idea of the art in its general
+  bearings,  but would not pursue his studies to their proper
+  outcome,  the consequence being that he was finally defeated
+  and overthrown.  He did not realize that the tricks and
+  artifices of war are beyond verbal computation.  Duke Hsiang
+  of Sung and King Yen of Hsu were brought to destruction by
+  their misplaced humanity.  The treacherous and underhand
+  nature of war necessitates the use of guile and stratagem
+  suited to the occasion.  There is a case on record of
+  Confucius himself having violated an extorted oath, [72]  and
+  also of his having left the Sung State in disguise. [73]  Can
+  we then recklessly arraign Sun Tzu for disregarding truth and
+  honesty?
+
+
+Bibliography
+------------
+
+
+     The following are the oldest Chinese treatises on war, after
+Sun Tzu.  The notes on each have been drawn principally from the
+SSU K`U CH`UAN SHU CHIEN MING MU LU, ch. 9, fol. 22 sqq.
+
+     1.  WU TZU, in 1 CHUAN or 6 chapters.  By Wu Ch`i  (d.  381
+B.C.).  A genuine work.  See SHIH CHI, ch. 65.
+
+     2.  SSU-MA FA, in 1 CHUAN or 5 chapters.  Wrongly attributed
+to Ssu-ma Jang-chu of the 6th century B.C.  Its date,  however,
+must be early, as the customs of the three ancient dynasties are
+constantly to be met within its pages.  See SHIH CHI, ch. 64.
+     The SSU K`U CH`UAN SHU (ch. 99, f. 1)  remarks that the
+oldest three treatises on war, SUN TZU, WU TZU and SSU-MA FA,
+are,  generally speaking, only concerned with things strictly
+military  --  the art of producing,  collecting,  training and
+drilling troops, and the correct theory with regard to measures
+of expediency, laying plans, transport of goods and the handling
+of soldiers -- in strong contrast to later works, in which the
+science of war is usually blended with metaphysics,  divination
+and magical arts in general.
+
+     3.  LIU T`AO, in 6 CHUAN, or 60 chapters.  Attributed to Lu
+Wang  (or Lu Shang, also known as T`ai Kung) of the 12th century
+B.C. [74]  But its style does not belong to the era of the Three
+Dynasties.  Lu Te-ming (550-625 A.D.) mentions the work,  and
+enumerates the headings of the six sections so that the forgery
+cannot have been later than Sui dynasty.
+
+     4.  WEI LIAO TZU, in 5 CHUAN.  Attributed to Wei Liao  (4th
+cent. B.C.), who studied under the famous Kuei-ku Tzu.  The work
+appears to have been originally in 31 chapters, whereas the text
+we possess contains only 24.  Its matter is sound enough in the
+main,  though the strategical devices differ considerably from
+those of the Warring States period.  It is been furnished with a
+commentary by the well-known Sung philosopher Chang Tsai.
+
+     5.  SAN LUEH, in 3 CHUAN.  Attributed to Huang-shih Kung,  a
+legendary personage who is said to have bestowed it on Chang
+Liang (d. 187 B.C.) in an interview on a bridge.  But here again,
+the style is not that of works dating from the Ch`in or Han
+period.  The Han Emperor Kuang Wu [25-57 A.D.] apparently quotes
+from it in one of his proclamations; but the passage in question
+may have been inserted later on,  in order to prove   the
+genuineness of the work.  We shall not be far out if we refer it
+to the Northern Sung period [420-478 A.D.], or somewhat earlier.
+
+     6.  LI WEI KUNG WEN TUI, in 3 sections.  Written in the form
+of a dialogue between T`ai Tsung and his great general Li Ching,
+it is usually ascribed to the latter.  Competent authorities
+consider it a forgery, though the author was evidently well
+versed in the art of war.
+
+     7.  LI CHING PING FA (not to be confounded with the
+foregoing)  is a short treatise in 8 chapters, preserved in the
+T`ung Tien, but not published separately.  This fact explains its
+omission from the SSU K`U CH`UAN SHU.
+
+     8.  WU CH`I CHING, in 1 CHUAN.  Attributed to the legendary
+minister Feng Hou, with exegetical notes by Kung-sun Hung of the
+Han dynasty (d. 121 B.C.), and said to have been eulogized by the
+celebrated general Ma Lung (d. 300 A.D.).  Yet the earliest
+mention of it is in the SUNG CHIH.  Although a forgery, the work
+is well put together.
+
+     Considering the high popular estimation in which Chu-ko
+Liang has always been held, it is not surprising to find more
+than one work on war ascribed to his pen.  Such are (1) the SHIH
+LIU TS`E (1 CHUAN), preserved in the YUNG LO TA TIEN; (2)  CHIANG
+YUAN  (1 CHUAN);  and  (3) HSIN SHU  (1 CHUAN),  which steals
+wholesale from Sun Tzu.  None of these has the slightest claim to
+be considered genuine.
+     Most of the large Chinese encyclopedias contain extensive
+sections devoted to the literature of war.  The following
+references may be found useful: --
+
+     T`UNG TIEN (circa 800 A.D.), ch. 148-162.
+     T`AI P`ING YU LAN (983), ch. 270-359.
+     WEN HSIEN TUNG K`AO (13th cent.), ch. 221.
+     YU HAI (13th cent.), ch. 140, 141.
+     SAN TS`AI T`U HUI (16th cent).
+     KUANG PO WU CHIH (1607), ch. 31, 32.
+     CH`IEN CH`IO LEI SHU (1632), ch. 75.
+     YUAN CHIEN LEI HAN (1710), ch. 206-229.
+     KU CHIN T`U SHU CHI CH`ENG (1726), section XXX, esp. ch. 81-
+      90.
+     HSU WEN HSIEN T`UNG K`AO (1784), ch. 121-134.
+     HUANG CH`AO CHING SHIH WEN PIEN (1826), ch. 76, 77.
+
+     The bibliographical sections of certain historical works
+also deserve mention: --
+
+     CH`IEN HAN SHU, ch. 30.
+     SUI SHU, ch. 32-35.
+     CHIU T`ANG SHU, ch. 46, 47.
+     HSIN T`ANG SHU, ch. 57,60.
+     SUNG SHIH, ch. 202-209.
+     T`UNG CHIH (circa 1150), ch. 68.
+
+     To these of course must be added the great Catalogue of the
+Imperial Library: --
+
+     SSU K`U CH`UAN SHU TSUNG MU T`I YAO (1790), ch. 99, 100.
+
+
+Footnotes
+---------
+
+
+1.  SHI CHI, ch. 65.
+
+2.  He reigned from 514 to 496 B.C.
+
+3.  SHI CHI, ch. 130.
+
+4.  The appellation of Nang Wa.
+
+5.  SHI CHI, ch. 31.
+
+6.  SHI CHI, ch. 25.
+
+7.  The appellation of Hu Yen, mentioned in ch. 39 under the year
+637.
+
+8.  Wang-tzu Ch`eng-fu, ch. 32, year 607.
+
+9.  The mistake is natural enough.  Native critics refer to a
+work of the Han dynasty, which says:  "Ten LI outside the WU gate
+[of the city of Wu, now Soochow in Kiangsu] there is a great
+mound, raised to commemorate the entertainment of Sun Wu of Ch`i,
+who excelled in the art of war, by the King of Wu."
+
+10.  "They attached strings to wood to make bows, and sharpened
+wood to make arrows.  The use of bows and arrows is to keep the
+Empire in awe."
+
+11.  The son and successor of Ho Lu.  He was finally defeated and
+overthrown by Kou chien, King of Yueh, in 473 B.C.  See post.
+
+12.  King Yen of Hsu, a fabulous being, of whom Sun Hsing-yen
+says in his preface:  "His humanity brought him to destruction."
+
+13.  The passage I have put in brackets is omitted in the T`U
+SHU, and may be an interpolation.  It was known, however to Chang
+Shou-chieh of the T`ang dynasty, and appears in the T`AI P`ING YU
+LAN.
+
+14.  Ts`ao Kung seems to be thinking of the first part of chap.
+II, perhaps especially of ss. 8.
+
+15.  See chap. XI.
+
+16.  On the other hand, it is noteworthy that WU TZU, which is
+not in 6 chapters, has 48 assigned to it in the HAN CHIH.
+Likewise, the CHUNG YUNG is credited with 49 chapters, though now
+only in one only.  In the case of very short works, one is
+tempted to think that P`IEN might simply mean "leaves."
+
+17.  Yeh Shih of the Sung dynasty [1151-1223].
+
+18.  He hardly deserves to be bracketed with assassins.
+
+19.  See Chapter 7, ss. 27 and Chapter 11, ss. 28.
+
+20.  See Chapter 11, ss. 28.  Chuan Chu is the abbreviated form
+of his name.
+
+21.  I.e. Po P`ei.  See ante.
+
+22.  The nucleus of this work is probably genuine, though large
+additions have been made by later hands.  Kuan chung died in 645
+B.C.
+
+23.  See infra, beginning of INTRODUCTION.
+
+24.  I do not know what this work, unless it be the last chapter
+of another work.  Why that chapter should be singled out,
+however, is not clear.
+
+25.  About 480 B.C.
+
+26.  That is, I suppose, the age of Wu Wang and Chou Kung.
+
+27.  In the 3rd century B.C.
+
+28.  Ssu-ma Jang-chu, whose family name was T`ien, lived in the
+latter half of the 6th century B.C., and is also believed to have
+written a work on war.  See SHIH CHI, ch. 64, and infra at the
+beginning of the INTRODUCTION.
+
+29.  See Legge's Classics, vol. V, Prolegomena p. 27.  Legge
+thinks that the TSO CHUAN must have been written in the 5th
+century, but not before 424 B.C.
+
+30.  See MENCIUS III. 1. iii. 13-20.
+
+31.  When Wu first appears in the CH`UN CH`IU in 584, it is
+already at variance with its powerful neighbor.  The CH`UN CH`IU
+first mentions Yueh in 537, the TSO CHUAN in 601.
+
+32.  This is explicitly stated in the TSO CHUAN, XXXII, 2.
+
+33.  There is this to be said for the later period, that the feud
+would tend to grow more bitter after each encounter, and thus
+more fully justify the language used in XI. ss. 30.
+
+34.  With Wu Yuan himself the case is just the reverse:  -- a
+spurious treatise on war has been fathered on him simply because
+he was a great general.  Here we have an obvious inducement to
+forgery.  Sun Wu, on the other hand, cannot have been widely
+known to fame in the 5th century.
+
+35.  From TSO CHUAN:  "From the date of King Chao's accession
+[515] there was no year in which Ch`u was not attacked by Wu."
+
+36.  Preface ad fin:  "My family comes from Lo-an, and we are
+really descended from Sun Tzu.  I am ashamed to say that I only
+read my ancestor's work from a literary point of view, without
+comprehending the military technique.  So long have we been
+enjoying the blessings of peace!"
+
+37.  Hoa-yin is about 14 miles from T`ung-kuan on the eastern
+border of Shensi.  The temple in question is still visited by
+those about the ascent of the Western Sacred Mountain.  It is
+mentioned in a text as being "situated five LI east of the
+district city of Hua-yin.  The temple contains the Hua-shan
+tablet inscribed by the T`ang Emperor Hsuan Tsung [713-755]."
+
+38.  See my "Catalogue of Chinese Books" (Luzac & Co., 1908), no.
+40.
+
+39.  This is a discussion of 29 difficult passages in Sun Tzu.
+
+40.  Cf.  Catalogue of the library of Fan family at Ningpo:  "His
+commentary is frequently obscure; it furnishes a clue, but does
+not fully develop the meaning."
+
+41.  WEN HSIEN T`UNG K`AO, ch. 221.
+
+42.  It is interesting to note that M. Pelliot has recently
+discovered chapters 1, 4 and 5 of this lost work in the "Grottos
+of the Thousand Buddhas."  See B.E.F.E.O., t. VIII, nos. 3-4, p.
+525.
+
+43.  The Hsia, the Shang and the Chou.  Although the last-named
+was nominally existent in Sun Tzu's day, it retained hardly a
+vestige of power, and the old military organization had
+practically gone by the board.  I can suggest no other
+explanation of the passage.
+
+44.  See CHOU LI, xxix. 6-10.
+
+45.  T`UNG K`AO, ch. 221.
+
+46.  This appears to be still extant.  See Wylie's "Notes," p. 91
+(new edition).
+
+47.  T`UNG K`AO, loc. cit.
+
+48.  A notable person in his day.  His biography is given in the
+SAN KUO CHIH, ch. 10.
+
+49.  See XI. ss. 58, note.
+
+50.  HOU HAN SHU, ch. 17 ad init.
+
+51.  SAN KUO CHIH, ch. 54.
+
+52.  SUNG SHIH, ch. 365 ad init.
+
+53.  The few Europeans who have yet had an opportunity of
+acquainting themselves with Sun Tzu are not behindhand in their
+praise.  In this connection, I may perhaps be excused for quoting
+from a letter from Lord Roberts, to whom the sheets of the
+present work were submitted previous to publication:  "Many of
+Sun Wu's maxims are perfectly applicable to the present day, and
+no. 11 [in Chapter VIII] is one that the people of this country
+would do well to take to heart."
+
+54.  Ch. 140.
+
+55.  See IV. ss. 3.
+
+56.  The allusion may be to Mencius VI. 2. ix. 2.
+
+57.  The TSO CHUAN.
+
+58.  SHIH CHI, ch. 25, fol. I.
+
+59.  Cf. SHIH CHI, ch 47.
+
+60.  See SHU CHING, preface ss. 55.
+
+61.  See SHIH CHI, ch. 47.
+
+62.  Lun Yu, XV. 1.
+
+63.  I failed to trace this utterance.
+
+64.  Supra.
+
+65.  Supra.
+
+66.  The other four being worship, mourning, entertainment of
+guests, and festive rites.  See SHU CHING, ii. 1. III. 8, and
+CHOU LI, IX. fol. 49.
+
+67.  See XIII. ss. 11, note.
+
+68.  This is a rather obscure allusion to the TSO CHUAN, where
+Tzu-ch`an says:  "If you have a piece of beautiful brocade, you
+will not employ a mere learner to make it up."
+
+69.  Cf.  TAO TE CHING, ch. 31.
+
+70.  Sun Hsing-yen might have quoted Confucius again.  See LUN
+YU, XIII. 29, 30.
+
+71.  Better known as Hsiang Yu [233-202 B.C.].
+
+72.  SHIH CHI, ch. 47.
+
+73.  SHIH CHI, ch. 38.
+
+74.  See XIII. ss. 27, note.  Further details on T`ai Kung will
+be found in the SHIH CHI, ch. 32 ad init.  Besides the tradition
+which makes him a former minister of Chou Hsin, two other
+accounts of him are there given, according to which he would
+appear to have been first raised from a humble private station by
+Wen Wang.
+
+-----------------------------------------------------------------
+
+I.  LAYING PLANS
+
+     [Ts`ao Kung, in defining the meaning of the Chinese for the
+title of this chapter, says it refers to the deliberations in the
+temple selected by the general for his temporary use, or as we
+should say, in his tent.  See. ss. 26.]
+
+     1.  Sun Tzu said:  The art of war is of vital importance to
+the State.
+     2.  It is a matter of life and death, a road either to
+safety or to ruin.  Hence it is a subject of inquiry which can on
+no account be neglected.
+     3.  The art of war, then, is governed by five constant
+factors,  to be taken into account in one's deliberations,  when
+seeking to determine the conditions obtaining in the field.
+     4.  These are:  (1) The Moral Law; (2) Heaven;  (3)  Earth;
+(4) The Commander; (5) Method and discipline.
+
+     [It appears from what follows that Sun Tzu means by  "Moral
+Law" a principle of harmony, not unlike the Tao of Lao Tzu in its
+moral aspect.  One might be tempted to render it by  "morale,"
+were it not considered as an attribute of the ruler in ss. 13.]
+
+     5,  6.  The MORAL LAW causes the people to be in complete
+accord with their ruler, so that they will follow him regardless
+of their lives, undismayed by any danger.
+
+     [Tu Yu quotes Wang Tzu as saying:   "Without constant
+practice,  the officers will be nervous and undecided when
+mustering for battle; without constant practice, the general will
+be wavering and irresolute when the crisis is at hand."]
+
+     7.  HEAVEN signifies night and day, cold and heat, times and
+seasons.
+
+     [The commentators, I think, make an unnecessary mystery of
+two words here.  Meng Shih refers to "the hard and the soft,
+waxing and waning" of Heaven.  Wang Hsi, however, may be right in
+saying that what is meant is "the general economy of Heaven,"
+including the five elements, the four seasons, wind and clouds,
+and other phenomena.]
+
+     8.  EARTH comprises distances, great and small; danger and
+security; open ground and narrow passes; the chances of life and
+death.
+     9.  The COMMANDER stands for the virtues of   wisdom,
+sincerity, benevolence, courage and strictness.
+
+     [The five cardinal virtues of the Chinese are (1)  humanity
+or benevolence; (2) uprightness of mind; (3) self-respect,  self-
+control,  or "proper feeling;" (4) wisdom; (5) sincerity or good
+faith.  Here "wisdom" and "sincerity" are put before "humanity or
+benevolence,"  and the two military virtues of  "courage"  and
+"strictness"  substituted for "uprightness of mind"  and  "self-
+respect, self-control, or 'proper feeling.'"]
+
+     10.  By METHOD AND DISCIPLINE are to be understood the
+marshaling   of the army in its proper   subdivisions,   the
+graduations of rank among the officers, the maintenance of roads
+by which supplies may reach the army, and the control of military
+expenditure.
+     11.  These five heads should be familiar to every general:
+he who knows them will be victorious; he who knows them not will
+fail.
+     12.  Therefore,  in your deliberations,  when seeking to
+determine the military conditions, let them be made the basis of
+a comparison, in this wise: --
+     13.  (1)   Which of the two sovereigns is imbued with the
+Moral law?
+
+     [I.e., "is in harmony with his subjects."  Cf. ss. 5.]
+
+     (2)  Which of the two generals has most ability?
+     (3)  With whom lie the advantages derived from Heaven and
+Earth?
+
+     [See ss. 7,8]
+
+     (4)  On which side is discipline most rigorously enforced?
+
+     [Tu Mu alludes to the remarkable story of Ts`ao Ts`ao  (A.D.
+155-220),  who was such a strict disciplinarian that once,  in
+accordance with his own severe regulations against injury to
+standing crops, he condemned himself to death for having allowed
+his horse to shy into a field of corn!  However,  in lieu of
+losing his head, he was persuaded to satisfy his sense of justice
+by cutting off his hair.  Ts`ao Ts`ao's own comment on the
+present passage is characteristically curt:  "when you lay down a
+law,  see that it is not disobeyed; if it is disobeyed the
+offender must be put to death."]
+
+     (5)  Which army is stronger?
+
+     [Morally as well as physically.  As Mei Yao-ch`en puts it,
+freely rendered, "ESPIRIT DE CORPS and 'big battalions.'"]
+
+     (6)  On which side are officers and men more highly trained?
+
+     [Tu Yu quotes Wang Tzu as saying:   "Without constant
+practice,  the officers will be nervous and undecided when
+mustering for battle; without constant practice, the general will
+be wavering and irresolute when the crisis is at hand."]
+
+     (7)   In which army is there the greater constancy both in
+reward and punishment?
+
+     [On which side is there the most absolute certainty that
+merit will be properly rewarded and misdeeds summarily punished?]
+
+     14.  By means of these seven considerations I can forecast
+victory or defeat.
+     15.  The general that hearkens to my counsel and acts upon
+it, will conquer:   --let such a one be retained in command!  The
+general that hearkens not to my counsel nor acts upon it,  will
+suffer defeat:  --let such a one be dismissed!
+
+     [The form of this paragraph reminds us that Sun Tzu's
+treatise was composed expressly for the benefit of his patron Ho
+Lu, king of the Wu State.]
+
+     16.  While heading the profit of my counsel, avail yourself
+also of any helpful circumstances over and beyond the ordinary
+rules.
+     17.  According as circumstances are favorable,  one should
+modify one's plans.
+
+     [Sun Tzu,  as a practical soldier, will have none of the
+"bookish theoric."  He cautions us here not to pin our faith to
+abstract principles; "for," as Chang Yu puts it, "while the main
+laws of strategy can be stated clearly enough for the benefit of
+all and sundry, you must be guided by the actions of the enemy in
+attempting to secure a favorable position in actual warfare."  On
+the eve of the battle of Waterloo, Lord Uxbridge, commanding the
+cavalry,  went to the Duke of Wellington in order to learn what
+his plans and calculations were for the morrow, because,  as he
+explained, he might suddenly find himself Commander-in-chief and
+would be unable to frame new plans in a critical moment.  The
+Duke listened quietly and then said:  "Who will attack the first
+tomorrow -- I or Bonaparte?"  "Bonaparte," replied Lord Uxbridge.
+"Well," continued the Duke, "Bonaparte has not given me any idea
+of his projects; and as my plans will depend upon his,  how can
+you expect me to tell you what mine are?" [1] ]
+
+     18.  All warfare is based on deception.
+
+     [The truth of this pithy and profound saying will be
+admitted by every soldier.  Col.  Henderson tells us   that
+Wellington,  great in so many military qualities, was especially
+distinguished by "the extraordinary skill with which he concealed
+his movements and deceived both friend and foe."]
+
+     19.  Hence, when able to attack, we must seem unable;  when
+using our forces, we must seem inactive; when we are near,  we
+must make the enemy believe we are far away; when far away,  we
+must make him believe we are near.
+     20.  Hold out baits to entice the enemy.  Feign disorder,
+and crush him.
+
+     [All commentators,  except Chang Yu, say, "When he is in
+disorder, crush him."  It is more natural to suppose that Sun Tzu
+is still illustrating the uses of deception in war.]
+
+     21.  If he is secure at all points, be prepared for him.  If
+he is in superior strength, evade him.
+     22.  If your opponent is of choleric temper,  seek to
+irritate him.  Pretend to be weak, that he may grow arrogant.
+
+     [Wang Tzu,  quoted by Tu Yu, says that the good tactician
+plays with his adversary as a cat plays with a mouse,  first
+feigning weakness and immobility, and then suddenly pouncing upon
+him.]
+
+     23.  If he is taking his ease, give him no rest.
+
+     [This is probably the meaning though Mei Yao-ch`en has the
+note:  "while we are taking our ease, wait for the enemy to tire
+himself out."  The YU LAN has "Lure him on and tire him out."]
+
+If his forces are united, separate them.
+
+     [Less plausible is the interpretation favored by most of the
+commentators:   "If sovereign and subject are in accord,  put
+division between them."]
+
+     24.  Attack him where he is unprepared, appear where you are
+not expected.
+     25.  These military devices, leading to victory, must not be
+divulged beforehand.
+     26.   Now the general who wins a battle makes   many
+calculations in his temple ere the battle is fought.
+
+     [Chang Yu tells us that in ancient times it was customary
+for a temple to be set apart for the use of a general who was
+about to take the field, in order that he might there elaborate
+his plan of campaign.]
+
+The general who loses a battle makes but few calculations
+beforehand.  Thus do many calculations lead to victory, and few
+calculations to defeat:  how much more no calculation at all!  It
+is by attention to this point that I can foresee who is likely to
+win or lose.
+
+
+[1]  "Words on Wellington," by Sir. W. Fraser.
+
+-----

<TRUNCATED>

[32/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
new file mode 100644
index 0000000..214c2a8
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in PRIMARY mode.
+ */
+public class IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest() {
+        super(PRIMARY, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
new file mode 100644
index 0000000..d7f34a1
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in SECONDARY mode.
+ */
+public class IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest() {
+        super(PROXY, false);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
new file mode 100644
index 0000000..0435eaa
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode.
+ */
+public class IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest() {
+        super(DUAL_ASYNC, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
new file mode 100644
index 0000000..3af7274
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode.
+ */
+public class IgniteHadoopFileSystemShmemExternalDualSyncSelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemExternalDualSyncSelfTest() {
+        super(DUAL_SYNC, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
new file mode 100644
index 0000000..ce9dbd9
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in PRIMARY mode.
+ */
+public class IgniteHadoopFileSystemShmemExternalPrimarySelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemExternalPrimarySelfTest() {
+        super(PRIMARY, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
new file mode 100644
index 0000000..bc8c182
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * IGFS Hadoop file system IPC shmem self test in SECONDARY mode.
+ */
+public class IgniteHadoopFileSystemShmemExternalSecondarySelfTest
+    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public IgniteHadoopFileSystemShmemExternalSecondarySelfTest() {
+        super(PROXY, true);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
new file mode 100644
index 0000000..3731213
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
@@ -0,0 +1,429 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.UUID;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper;
+import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsUserContext;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount1;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.lang.IgniteOutClosure;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_COUNTER_WRITER_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Abstract test of whole cycle of map-reduce processing via Job tracker.
+ */
+public class HadoopAbstractMapReduceTest extends HadoopAbstractWordCountTest {
+    /** IGFS block size. */
+    protected static final int IGFS_BLOCK_SIZE = 512 * 1024;
+
+    /** Amount of blocks to prefetch. */
+    protected static final int PREFETCH_BLOCKS = 1;
+
+    /** Amount of sequential block reads before prefetch is triggered. */
+    protected static final int SEQ_READS_BEFORE_PREFETCH = 2;
+
+    /** Secondary file system URI. */
+    protected static final String SECONDARY_URI = "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/";
+
+    /** Secondary file system configuration path. */
+    protected static final String SECONDARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml";
+
+    /** The user to run Hadoop job on behalf of. */
+    protected static final String USER = "vasya";
+
+    /** Secondary IGFS name. */
+    protected static final String SECONDARY_IGFS_NAME = "igfs-secondary";
+
+    /** Red constant. */
+    protected static final int red = 10_000;
+
+    /** Blue constant. */
+    protected static final int blue = 20_000;
+
+    /** Green constant. */
+    protected static final int green = 15_000;
+
+    /** Yellow constant. */
+    protected static final int yellow = 7_000;
+
+    /** The secondary Ignite node. */
+    protected Ignite igniteSecondary;
+
+    /** The secondary Fs. */
+    protected IgfsSecondaryFileSystem secondaryFs;
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 3;
+    }
+
+    /**
+     * Gets owner of a IgfsEx path.
+     * @param p The path.
+     * @return The owner.
+     */
+    private static String getOwner(final IgfsEx i, final IgfsPath p) {
+        return IgfsUserContext.doAs(USER, new IgniteOutClosure<String>() {
+            @Override public String apply() {
+                IgfsFile f = i.info(p);
+
+                assert f != null;
+
+                return f.property(IgfsUtils.PROP_USER_NAME);
+            }
+        });
+    }
+
+    /**
+     * Gets owner of a secondary Fs path.
+     * @param secFs The sec Fs.
+     * @param p The path.
+     * @return The owner.
+     */
+    private static String getOwnerSecondary(final IgfsSecondaryFileSystem secFs, final IgfsPath p) {
+        return IgfsUserContext.doAs(USER, new IgniteOutClosure<String>() {
+            @Override public String apply() {
+                return secFs.info(p).property(IgfsUtils.PROP_USER_NAME);
+            }
+        });
+    }
+
+    /**
+     * Checks owner of the path.
+     * @param p The path.
+     */
+    private void checkOwner(IgfsPath p) {
+        String ownerPrim = getOwner(igfs, p);
+        assertEquals(USER, ownerPrim);
+
+        String ownerSec = getOwnerSecondary(secondaryFs, p);
+        assertEquals(USER, ownerSec);
+    }
+
+    /**
+     * Does actual test job
+     *
+     * @param useNewMapper flag to use new mapper API.
+     * @param useNewCombiner flag to use new combiner API.
+     * @param useNewReducer flag to use new reducer API.
+     */
+    protected final void doTest(IgfsPath inFile, boolean useNewMapper, boolean useNewCombiner, boolean useNewReducer)
+        throws Exception {
+        igfs.delete(new IgfsPath(PATH_OUTPUT), true);
+
+        JobConf jobConf = new JobConf();
+
+        jobConf.set(JOB_COUNTER_WRITER_PROPERTY, IgniteHadoopFileSystemCounterWriter.class.getName());
+        jobConf.setUser(USER);
+        jobConf.set(IgniteHadoopFileSystemCounterWriter.COUNTER_WRITER_DIR_PROPERTY, "/xxx/${USER}/zzz");
+
+        //To split into about 40 items for v2
+        jobConf.setInt(FileInputFormat.SPLIT_MAXSIZE, 65000);
+
+        //For v1
+        jobConf.setInt("fs.local.block.size", 65000);
+
+        // File system coordinates.
+        setupFileSystems(jobConf);
+
+        HadoopWordCount1.setTasksClasses(jobConf, !useNewMapper, !useNewCombiner, !useNewReducer);
+
+        Job job = Job.getInstance(jobConf);
+
+        HadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer, compressOutputSnappy());
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        FileInputFormat.setInputPaths(job, new Path(igfsScheme() + inFile.toString()));
+        FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
+
+        job.setJarByClass(HadoopWordCount2.class);
+
+        HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1);
+
+        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
+
+        fut.get();
+
+        checkJobStatistics(jobId);
+
+        final String outFile = PATH_OUTPUT + "/" + (useNewReducer ? "part-r-" : "part-") + "00000";
+
+        checkOwner(new IgfsPath(PATH_OUTPUT + "/" + "_SUCCESS"));
+
+        checkOwner(new IgfsPath(outFile));
+
+        String actual = readAndSortFile(outFile, job.getConfiguration());
+
+        assertEquals("Use new mapper: " + useNewMapper + ", new combiner: " + useNewCombiner + ", new reducer: " +
+                useNewReducer,
+            "blue\t" + blue + "\n" +
+                "green\t" + green + "\n" +
+                "red\t" + red + "\n" +
+                "yellow\t" + yellow + "\n",
+            actual
+        );
+    }
+
+    /**
+     * Gets if to compress output data with Snappy.
+     *
+     * @return If to compress output data with Snappy.
+     */
+    protected boolean compressOutputSnappy() {
+        return false;
+    }
+
+    /**
+     * Simple test job statistics.
+     *
+     * @param jobId Job id.
+     * @throws IgniteCheckedException
+     */
+    private void checkJobStatistics(HadoopJobId jobId) throws IgniteCheckedException, IOException {
+        HadoopCounters cntrs = grid(0).hadoop().counters(jobId);
+
+        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
+
+        Map<String, SortedMap<Integer,Long>> tasks = new TreeMap<>();
+
+        Map<String, Integer> phaseOrders = new HashMap<>();
+        phaseOrders.put("submit", 0);
+        phaseOrders.put("prepare", 1);
+        phaseOrders.put("start", 2);
+        phaseOrders.put("Cstart", 3);
+        phaseOrders.put("finish", 4);
+
+        String prevTaskId = null;
+
+        long apiEvtCnt = 0;
+
+        for (T2<String, Long> evt : perfCntr.evts()) {
+            //We expect string pattern: COMBINE 1 run 7fa86a14-5a08-40e3-a7cb-98109b52a706
+            String[] parsedEvt = evt.get1().split(" ");
+
+            String taskId;
+            String taskPhase;
+
+            if ("JOB".equals(parsedEvt[0])) {
+                taskId = parsedEvt[0];
+                taskPhase = parsedEvt[1];
+            }
+            else {
+                taskId = ("COMBINE".equals(parsedEvt[0]) ? "MAP" : parsedEvt[0].substring(0, 3)) + parsedEvt[1];
+                taskPhase = ("COMBINE".equals(parsedEvt[0]) ? "C" : "") + parsedEvt[2];
+            }
+
+            if (!taskId.equals(prevTaskId))
+                tasks.put(taskId, new TreeMap<Integer,Long>());
+
+            Integer pos = phaseOrders.get(taskPhase);
+
+            assertNotNull("Invalid phase " + taskPhase, pos);
+
+            tasks.get(taskId).put(pos, evt.get2());
+
+            prevTaskId = taskId;
+
+            apiEvtCnt++;
+        }
+
+        for (Map.Entry<String ,SortedMap<Integer,Long>> task : tasks.entrySet()) {
+            Map<Integer, Long> order = task.getValue();
+
+            long prev = 0;
+
+            for (Map.Entry<Integer, Long> phase : order.entrySet()) {
+                assertTrue("Phase order of " + task.getKey() + " is invalid", phase.getValue() >= prev);
+
+                prev = phase.getValue();
+            }
+        }
+
+        final IgfsPath statPath = new IgfsPath("/xxx/" + USER + "/zzz/" + jobId + "/performance");
+
+        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return igfs.exists(statPath);
+            }
+        }, 20_000);
+
+        final long apiEvtCnt0 = apiEvtCnt;
+
+        boolean res = GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                try {
+                    try (BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)))) {
+                        return apiEvtCnt0 == HadoopTestUtils.simpleCheckJobStatFile(reader);
+                    }
+                }
+                catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        }, 10000);
+
+        if (!res) {
+            BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)));
+
+            assert false : "Invalid API events count [exp=" + apiEvtCnt0 +
+                ", actual=" + HadoopTestUtils.simpleCheckJobStatFile(reader) + ']';
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        igniteSecondary = startGridWithIgfs("grid-secondary", SECONDARY_IGFS_NAME, PRIMARY, null, SECONDARY_REST_CFG);
+
+        super.beforeTest();
+    }
+
+    /**
+     * Start grid with IGFS.
+     *
+     * @param gridName Grid name.
+     * @param igfsName IGFS name
+     * @param mode IGFS mode.
+     * @param secondaryFs Secondary file system (optional).
+     * @param restCfg Rest configuration string (optional).
+     * @return Started grid instance.
+     * @throws Exception If failed.
+     */
+    protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode,
+        @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable IgfsIpcEndpointConfiguration restCfg) throws Exception {
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("dataCache");
+        igfsCfg.setMetaCacheName("metaCache");
+        igfsCfg.setName(igfsName);
+        igfsCfg.setBlockSize(IGFS_BLOCK_SIZE);
+        igfsCfg.setDefaultMode(mode);
+        igfsCfg.setIpcEndpointConfiguration(restCfg);
+        igfsCfg.setSecondaryFileSystem(secondaryFs);
+        igfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS);
+        igfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH);
+
+        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
+
+        dataCacheCfg.setName("dataCache");
+        dataCacheCfg.setCacheMode(PARTITIONED);
+        dataCacheCfg.setNearConfiguration(null);
+        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
+        dataCacheCfg.setBackups(0);
+        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
+        dataCacheCfg.setOffHeapMaxMemory(0);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("metaCache");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setConnectorConfiguration(null);
+
+        HadoopConfiguration hadoopCfg = createHadoopConfiguration();
+
+        if (hadoopCfg != null)
+            cfg.setHadoopConfiguration(hadoopCfg);
+
+        return G.start(cfg);
+    }
+
+    /**
+     * Creates custom Hadoop configuration.
+     *
+     * @return The Hadoop configuration.
+     */
+    protected HadoopConfiguration createHadoopConfiguration() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileSystemConfiguration igfsConfiguration() throws Exception {
+        FileSystemConfiguration fsCfg = super.igfsConfiguration();
+
+        secondaryFs = new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG);
+
+        fsCfg.setSecondaryFileSystem(secondaryFs);
+
+        return fsCfg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
new file mode 100644
index 0000000..fb16988
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.File;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.ConnectorConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper;
+import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
+import org.apache.ignite.igfs.IgfsIpcEndpointType;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+
+/**
+ * Abstract class for Hadoop tests.
+ */
+public abstract class HadoopAbstractSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** REST port. */
+    protected static final int REST_PORT = 11212;
+
+    /** IGFS name. */
+    protected static final String igfsName = null;
+
+    /** IGFS name. */
+    protected static final String igfsMetaCacheName = "meta";
+
+    /** IGFS name. */
+    protected static final String igfsDataCacheName = "data";
+
+    /** IGFS block size. */
+    protected static final int igfsBlockSize = 1024;
+
+    /** IGFS block group size. */
+    protected static final int igfsBlockGroupSize = 8;
+
+    /** Initial REST port. */
+    private int restPort = REST_PORT;
+
+    /** Secondary file system REST endpoint configuration. */
+    protected static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG;
+
+    static {
+        SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration();
+
+        SECONDARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
+        SECONDARY_REST_CFG.setPort(11500);
+    }
+
+
+    /** Initial classpath. */
+    private static String initCp;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        // Add surefire classpath to regular classpath.
+        initCp = System.getProperty("java.class.path");
+
+        String surefireCp = System.getProperty("surefire.test.class.path");
+
+        if (surefireCp != null)
+            System.setProperty("java.class.path", initCp + File.pathSeparatorChar + surefireCp);
+
+        super.beforeTestsStarted();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        // Restore classpath.
+        System.setProperty("java.class.path", initCp);
+
+        initCp = null;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setHadoopConfiguration(hadoopConfiguration(gridName));
+
+        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
+
+        commSpi.setSharedMemoryPort(-1);
+
+        cfg.setCommunicationSpi(commSpi);
+
+        TcpDiscoverySpi discoSpi = (TcpDiscoverySpi)cfg.getDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        if (igfsEnabled()) {
+            cfg.setCacheConfiguration(metaCacheConfiguration(), dataCacheConfiguration());
+
+            cfg.setFileSystemConfiguration(igfsConfiguration());
+        }
+
+        if (restEnabled()) {
+            ConnectorConfiguration clnCfg = new ConnectorConfiguration();
+
+            clnCfg.setPort(restPort++);
+
+            cfg.setConnectorConfiguration(clnCfg);
+        }
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setPeerClassLoadingEnabled(false);
+
+        return cfg;
+    }
+
+    /**
+     * @param gridName Grid name.
+     * @return Hadoop configuration.
+     */
+    public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = new HadoopConfiguration();
+
+        cfg.setMaxParallelTasks(3);
+
+        return cfg;
+    }
+
+    /**
+     * @return IGFS configuration.
+     */
+    public FileSystemConfiguration igfsConfiguration() throws Exception {
+        FileSystemConfiguration cfg = new FileSystemConfiguration();
+
+        cfg.setName(igfsName);
+        cfg.setBlockSize(igfsBlockSize);
+        cfg.setDataCacheName(igfsDataCacheName);
+        cfg.setMetaCacheName(igfsMetaCacheName);
+        cfg.setFragmentizerEnabled(false);
+
+        return cfg;
+    }
+
+    /**
+     * @return IGFS meta cache configuration.
+     */
+    public CacheConfiguration metaCacheConfiguration() {
+        CacheConfiguration cfg = new CacheConfiguration();
+
+        cfg.setName(igfsMetaCacheName);
+        cfg.setCacheMode(REPLICATED);
+        cfg.setAtomicityMode(TRANSACTIONAL);
+        cfg.setWriteSynchronizationMode(FULL_SYNC);
+
+        return cfg;
+    }
+
+    /**
+     * @return IGFS data cache configuration.
+     */
+    private CacheConfiguration dataCacheConfiguration() {
+        CacheConfiguration cfg = new CacheConfiguration();
+
+        cfg.setName(igfsDataCacheName);
+        cfg.setCacheMode(PARTITIONED);
+        cfg.setAtomicityMode(TRANSACTIONAL);
+        cfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(igfsBlockGroupSize));
+        cfg.setWriteSynchronizationMode(FULL_SYNC);
+
+        return cfg;
+    }
+
+    /**
+     * @return {@code True} if IGFS is enabled on Hadoop nodes.
+     */
+    protected boolean igfsEnabled() {
+        return false;
+    }
+
+    /**
+     * @return {@code True} if REST is enabled on Hadoop nodes.
+     */
+    protected boolean restEnabled() {
+        return false;
+    }
+
+    /**
+     * @return Number of nodes to start.
+     */
+    protected int gridCount() {
+        return 3;
+    }
+
+    /**
+     * @param cfg Config.
+     */
+    protected void setupFileSystems(Configuration cfg) {
+        cfg.set("fs.defaultFS", igfsScheme());
+        cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName());
+        cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.
+            class.getName());
+
+        HadoopFileSystemsUtils.setupFileSystems(cfg);
+    }
+
+    /**
+     * @return IGFS scheme for test.
+     */
+    protected String igfsScheme() {
+        return "igfs://:" + getTestGridName(0) + "@/";
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
new file mode 100644
index 0000000..e45c127
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import com.google.common.base.Joiner;
+import java.io.BufferedReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+
+/**
+ * Abstract class for tests based on WordCount test job.
+ */
+public abstract class HadoopAbstractWordCountTest extends HadoopAbstractSelfTest {
+    /** Input path. */
+    protected static final String PATH_INPUT = "/input";
+
+    /** Output path. */
+    protected static final String PATH_OUTPUT = "/output";
+
+    /** IGFS instance. */
+    protected IgfsEx igfs;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        Configuration cfg = new Configuration();
+
+        setupFileSystems(cfg);
+
+        // Init cache by correct LocalFileSystem implementation
+        FileSystem.getLocal(cfg);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        igfs = (IgfsEx)startGrids(gridCount()).fileSystem(igfsName);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 1;
+    }
+
+    /**
+     * Generates test file.
+     *
+     * @param path File name.
+     * @param wordCounts Words and counts.
+     * @throws Exception If failed.
+     */
+    protected void generateTestFile(String path, Object... wordCounts) throws Exception {
+        List<String> wordsArr = new ArrayList<>();
+
+        //Generating
+        for (int i = 0; i < wordCounts.length; i += 2) {
+            String word = (String) wordCounts[i];
+            int cnt = (Integer) wordCounts[i + 1];
+
+            while (cnt-- > 0)
+                wordsArr.add(word);
+        }
+
+        //Shuffling
+        for (int i = 0; i < wordsArr.size(); i++) {
+            int j = (int)(Math.random() * wordsArr.size());
+
+            Collections.swap(wordsArr, i, j);
+        }
+
+        //Input file preparing
+        PrintWriter testInputFileWriter = new PrintWriter(igfs.create(new IgfsPath(path), true));
+
+        int j = 0;
+
+        while (j < wordsArr.size()) {
+            int i = 5 + (int)(Math.random() * 5);
+
+            List<String> subList = wordsArr.subList(j, Math.min(j + i, wordsArr.size()));
+            j += i;
+
+            testInputFileWriter.println(Joiner.on(' ').join(subList));
+        }
+
+        testInputFileWriter.close();
+    }
+
+    /**
+     * Read w/o decoding (default).
+     *
+     * @param fileName The file.
+     * @return The file contents, human-readable.
+     * @throws Exception On error.
+     */
+    protected String readAndSortFile(String fileName) throws Exception {
+        return readAndSortFile(fileName, null);
+    }
+
+    /**
+     * Reads whole text file into String.
+     *
+     * @param fileName Name of the file to read.
+     * @return Content of the file as String value.
+     * @throws Exception If could not read the file.
+     */
+    protected String readAndSortFile(String fileName, Configuration conf) throws Exception {
+        final List<String> list = new ArrayList<>();
+
+        final boolean snappyDecode = conf != null && conf.getBoolean(FileOutputFormat.COMPRESS, false);
+
+        if (snappyDecode) {
+            try (SequenceFile.Reader reader = new SequenceFile.Reader(conf,
+                    SequenceFile.Reader.file(new Path(fileName)))) {
+                Text key = new Text();
+
+                IntWritable val = new IntWritable();
+
+                while (reader.next(key, val))
+                    list.add(key + "\t" + val);
+            }
+        }
+        else {
+            try (InputStream is0 = igfs.open(new IgfsPath(fileName))) {
+                BufferedReader reader = new BufferedReader(new InputStreamReader(is0));
+
+                String line;
+
+                while ((line = reader.readLine()) != null)
+                    list.add(line);
+            }
+        }
+
+        Collections.sort(list);
+
+        return Joiner.on('\n').join(list) + "\n";
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
new file mode 100644
index 0000000..2fd7777
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import javax.security.auth.AuthPermission;
+import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.internal.processors.hadoop.deps.CircularWIthHadoop;
+import org.apache.ignite.internal.processors.hadoop.deps.CircularWithoutHadoop;
+import org.apache.ignite.internal.processors.hadoop.deps.WithIndirectField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithCast;
+import org.apache.ignite.internal.processors.hadoop.deps.WithClassAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithConstructorInvocation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodCheckedException;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodRuntimeException;
+import org.apache.ignite.internal.processors.hadoop.deps.WithExtends;
+import org.apache.ignite.internal.processors.hadoop.deps.WithField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithImplements;
+import org.apache.ignite.internal.processors.hadoop.deps.WithInitializer;
+import org.apache.ignite.internal.processors.hadoop.deps.WithInnerClass;
+import org.apache.ignite.internal.processors.hadoop.deps.WithLocalVariable;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodInvocation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodArgument;
+import org.apache.ignite.internal.processors.hadoop.deps.WithMethodReturnType;
+import org.apache.ignite.internal.processors.hadoop.deps.WithOuterClass;
+import org.apache.ignite.internal.processors.hadoop.deps.WithParameterAnnotation;
+import org.apache.ignite.internal.processors.hadoop.deps.WithStaticField;
+import org.apache.ignite.internal.processors.hadoop.deps.WithStaticInitializer;
+import org.apache.ignite.internal.processors.hadoop.deps.Without;
+
+/**
+ * Tests for Hadoop classloader.
+ */
+public class HadoopClassLoaderTest extends TestCase {
+    /** */
+    final HadoopClassLoader ldr = new HadoopClassLoader(null, "test", null);
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testClassLoading() throws Exception {
+        assertNotSame(CircularWIthHadoop.class, ldr.loadClass(CircularWIthHadoop.class.getName()));
+        assertNotSame(CircularWithoutHadoop.class, ldr.loadClass(CircularWithoutHadoop.class.getName()));
+
+        assertSame(Without.class, ldr.loadClass(Without.class.getName()));
+    }
+
+    /**
+     * Test dependency search.
+     */
+    public void testDependencySearch() {
+        // Positive cases:
+        final Class[] positiveClasses = {
+            Configuration.class,
+            HadoopUtils.class,
+            WithStaticField.class,
+            WithCast.class,
+            WithClassAnnotation.class,
+            WithConstructorInvocation.class,
+            WithMethodCheckedException.class,
+            WithMethodRuntimeException.class,
+            WithExtends.class,
+            WithField.class,
+            WithImplements.class,
+            WithInitializer.class,
+            WithInnerClass.class,
+            WithOuterClass.InnerNoHadoop.class,
+            WithLocalVariable.class,
+            WithMethodAnnotation.class,
+            WithMethodInvocation.class,
+            WithMethodArgument.class,
+            WithMethodReturnType.class,
+            WithParameterAnnotation.class,
+            WithStaticField.class,
+            WithStaticInitializer.class,
+            WithIndirectField.class,
+            CircularWIthHadoop.class,
+            CircularWithoutHadoop.class,
+        };
+
+        for (Class c : positiveClasses)
+            assertTrue(c.getName(), ldr.hasExternalDependencies(c.getName()));
+
+        // Negative cases:
+        final Class[] negativeClasses = {
+            Object.class,
+            AuthPermission.class,
+            Without.class,
+        };
+
+        for (Class c : negativeClasses)
+            assertFalse(c.getName(), ldr.hasExternalDependencies(c.getName()));
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
new file mode 100644
index 0000000..7ee318a
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
@@ -0,0 +1,474 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import com.google.common.base.Joiner;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter;
+import org.apache.ignite.igfs.IgfsInputStream;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.IgnitionEx;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.processors.resource.GridSpringResourceContext;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jsr166.ConcurrentHashMap8;
+
+/**
+ * Test of integration with Hadoop client via command line interface.
+ */
+public class HadoopCommandLineTest extends GridCommonAbstractTest {
+    /** IGFS instance. */
+    private IgfsEx igfs;
+
+    /** */
+    private static final String igfsName = "igfs";
+
+    /** */
+    private static File testWorkDir;
+
+    /** */
+    private static String hadoopHome;
+
+    /** */
+    private static String hiveHome;
+
+    /** */
+    private static File examplesJar;
+
+    /**
+     *
+     * @param path File name.
+     * @param wordCounts Words and counts.
+     * @throws Exception If failed.
+     */
+    private void generateTestFile(File path, Object... wordCounts) throws Exception {
+        List<String> wordsArr = new ArrayList<>();
+
+        //Generating
+        for (int i = 0; i < wordCounts.length; i += 2) {
+            String word = (String) wordCounts[i];
+            int cnt = (Integer) wordCounts[i + 1];
+
+            while (cnt-- > 0)
+                wordsArr.add(word);
+        }
+
+        //Shuffling
+        for (int i = 0; i < wordsArr.size(); i++) {
+            int j = (int)(Math.random() * wordsArr.size());
+
+            Collections.swap(wordsArr, i, j);
+        }
+
+        //Writing file
+        try (PrintWriter writer = new PrintWriter(path)) {
+            int j = 0;
+
+            while (j < wordsArr.size()) {
+                int i = 5 + (int)(Math.random() * 5);
+
+                List<String> subList = wordsArr.subList(j, Math.min(j + i, wordsArr.size()));
+                j += i;
+
+                writer.println(Joiner.on(' ').join(subList));
+            }
+
+            writer.flush();
+        }
+    }
+
+    /**
+     * Generates two data files to join its with Hive.
+     *
+     * @throws FileNotFoundException If failed.
+     */
+    private void generateHiveTestFiles() throws FileNotFoundException {
+        try (PrintWriter writerA = new PrintWriter(new File(testWorkDir, "data-a"));
+             PrintWriter writerB = new PrintWriter(new File(testWorkDir, "data-b"))) {
+            char sep = '\t';
+
+            int idB = 0;
+            int idA = 0;
+            int v = 1000;
+
+            for (int i = 0; i < 1000; i++) {
+                writerA.print(idA++);
+                writerA.print(sep);
+                writerA.println(idB);
+
+                writerB.print(idB++);
+                writerB.print(sep);
+                writerB.println(v += 2);
+
+                writerB.print(idB++);
+                writerB.print(sep);
+                writerB.println(v += 2);
+            }
+
+            writerA.flush();
+            writerB.flush();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        hiveHome = IgniteSystemProperties.getString("HIVE_HOME");
+
+        assertFalse("HIVE_HOME hasn't been set.", F.isEmpty(hiveHome));
+
+        hadoopHome = IgniteSystemProperties.getString("HADOOP_HOME");
+
+        assertFalse("HADOOP_HOME hasn't been set.", F.isEmpty(hadoopHome));
+
+        String mapredHome = hadoopHome + "/share/hadoop/mapreduce";
+
+        File[] fileList = new File(mapredHome).listFiles(new FileFilter() {
+            @Override public boolean accept(File pathname) {
+                return pathname.getName().startsWith("hadoop-mapreduce-examples-") &&
+                    pathname.getName().endsWith(".jar");
+            }
+        });
+
+        assertEquals("Invalid hadoop distribution.", 1, fileList.length);
+
+        examplesJar = fileList[0];
+
+        testWorkDir = Files.createTempDirectory("hadoop-cli-test").toFile();
+
+        U.copy(resolveHadoopConfig("core-site.ignite.xml"), new File(testWorkDir, "core-site.xml"), false);
+
+        File srcFile = resolveHadoopConfig("mapred-site.ignite.xml");
+        File dstFile = new File(testWorkDir, "mapred-site.xml");
+
+        try (BufferedReader in = new BufferedReader(new FileReader(srcFile));
+             PrintWriter out = new PrintWriter(dstFile)) {
+            String line;
+
+            while ((line = in.readLine()) != null) {
+                if (line.startsWith("</configuration>"))
+                    out.println(
+                        "    <property>\n" +
+                        "        <name>" + HadoopUtils.JOB_COUNTER_WRITER_PROPERTY + "</name>\n" +
+                        "        <value>" + IgniteHadoopFileSystemCounterWriter.class.getName() + "</value>\n" +
+                        "    </property>\n");
+
+                out.println(line);
+            }
+
+            out.flush();
+        }
+
+        generateTestFile(new File(testWorkDir, "test-data"), "red", 100, "green", 200, "blue", 150, "yellow", 50);
+
+        generateHiveTestFiles();
+    }
+
+    /**
+     * Resolve Hadoop configuration file.
+     *
+     * @param name File name.
+     * @return Resolve file.
+     */
+    private static File resolveHadoopConfig(String name) {
+        File path = U.resolveIgnitePath("modules/hadoop/config/" + name);
+
+        return path != null ? path : U.resolveIgnitePath("config/hadoop/" + name);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        U.delete(testWorkDir);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        String cfgPath = "config/hadoop/default-config.xml";
+
+        IgniteBiTuple<IgniteConfiguration, GridSpringResourceContext> tup = IgnitionEx.loadConfiguration(cfgPath);
+
+        IgniteConfiguration cfg = tup.get1();
+
+        cfg.setLocalHost("127.0.0.1"); // Avoid connecting to other nodes.
+
+        igfs = (IgfsEx) Ignition.start(cfg).fileSystem(igfsName);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /**
+     * Creates the process build with appropriate environment to run Hadoop CLI.
+     *
+     * @return Process builder.
+     */
+    private ProcessBuilder createProcessBuilder() {
+        String sep = ":";
+
+        String ggClsPath = HadoopJob.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep +
+            HadoopJobTracker.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep +
+            ConcurrentHashMap8.class.getProtectionDomain().getCodeSource().getLocation().getPath();
+
+        ProcessBuilder res = new ProcessBuilder();
+
+        res.environment().put("HADOOP_HOME", hadoopHome);
+        res.environment().put("HADOOP_CLASSPATH", ggClsPath);
+        res.environment().put("HADOOP_CONF_DIR", testWorkDir.getAbsolutePath());
+
+        res.redirectErrorStream(true);
+
+        return res;
+    }
+
+    /**
+     * Waits for process exit and prints the its output.
+     *
+     * @param proc Process.
+     * @return Exit code.
+     * @throws Exception If failed.
+     */
+    private int watchProcess(Process proc) throws Exception {
+        BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
+
+        String line;
+
+        while ((line = reader.readLine()) != null)
+            log().info(line);
+
+        return proc.waitFor();
+    }
+
+    /**
+     * Executes Hadoop command line tool.
+     *
+     * @param args Arguments for Hadoop command line tool.
+     * @return Process exit code.
+     * @throws Exception If failed.
+     */
+    private int executeHadoopCmd(String... args) throws Exception {
+        ProcessBuilder procBuilder = createProcessBuilder();
+
+        List<String> cmd = new ArrayList<>();
+
+        cmd.add(hadoopHome + "/bin/hadoop");
+        cmd.addAll(Arrays.asList(args));
+
+        procBuilder.command(cmd);
+
+        log().info("Execute: " + procBuilder.command());
+
+        return watchProcess(procBuilder.start());
+    }
+
+    /**
+     * Executes Hive query.
+     *
+     * @param qry Query.
+     * @return Process exit code.
+     * @throws Exception If failed.
+     */
+    private int executeHiveQuery(String qry) throws Exception {
+        ProcessBuilder procBuilder = createProcessBuilder();
+
+        List<String> cmd = new ArrayList<>();
+
+        procBuilder.command(cmd);
+
+        cmd.add(hiveHome + "/bin/hive");
+
+        cmd.add("--hiveconf");
+        cmd.add("hive.rpc.query.plan=true");
+
+        cmd.add("--hiveconf");
+        cmd.add("javax.jdo.option.ConnectionURL=jdbc:derby:" + testWorkDir.getAbsolutePath() + "/metastore_db;" +
+            "databaseName=metastore_db;create=true");
+
+        cmd.add("-e");
+        cmd.add(qry);
+
+        procBuilder.command(cmd);
+
+        log().info("Execute: " + procBuilder.command());
+
+        return watchProcess(procBuilder.start());
+    }
+
+    /**
+     * Tests Hadoop command line integration.
+     */
+    public void testHadoopCommandLine() throws Exception {
+        assertEquals(0, executeHadoopCmd("fs", "-ls", "/"));
+
+        assertEquals(0, executeHadoopCmd("fs", "-mkdir", "/input"));
+
+        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "test-data").getAbsolutePath(), "/input"));
+
+        assertTrue(igfs.exists(new IgfsPath("/input/test-data")));
+
+        assertEquals(0, executeHadoopCmd("jar", examplesJar.getAbsolutePath(), "wordcount", "/input", "/output"));
+
+        IgfsPath path = new IgfsPath("/user/" + System.getProperty("user.name") + "/");
+
+        assertTrue(igfs.exists(path));
+
+        IgfsPath jobStatPath = null;
+
+        for (IgfsPath jobPath : igfs.listPaths(path)) {
+            assertNull(jobStatPath);
+
+            jobStatPath = jobPath;
+        }
+
+        File locStatFile = new File(testWorkDir, "performance");
+
+        assertEquals(0, executeHadoopCmd("fs", "-get", jobStatPath.toString() + "/performance", locStatFile.toString()));
+
+        long evtCnt = HadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile)));
+
+        assertTrue(evtCnt >= 22); //It's the minimum amount of events for job with combiner.
+
+        assertTrue(igfs.exists(new IgfsPath("/output")));
+
+        BufferedReader in = new BufferedReader(new InputStreamReader(igfs.open(new IgfsPath("/output/part-r-00000"))));
+
+        List<String> res = new ArrayList<>();
+
+        String line;
+
+        while ((line = in.readLine()) != null)
+            res.add(line);
+
+        Collections.sort(res);
+
+        assertEquals("[blue\t150, green\t200, red\t100, yellow\t50]", res.toString());
+    }
+
+    /**
+     * Runs query check result.
+     *
+     * @param expRes Expected result.
+     * @param qry Query.
+     * @throws Exception If failed.
+     */
+    private void checkQuery(String expRes, String qry) throws Exception {
+        assertEquals(0, executeHiveQuery("drop table if exists result"));
+
+        assertEquals(0, executeHiveQuery(
+            "create table result " +
+            "row format delimited fields terminated by ' ' " +
+            "stored as textfile " +
+            "location '/result' as " + qry
+        ));
+
+        IgfsInputStream in = igfs.open(new IgfsPath("/result/000000_0"));
+
+        byte[] buf = new byte[(int) in.length()];
+
+        in.read(buf);
+
+        assertEquals(expRes, new String(buf));
+    }
+
+    /**
+     * Tests Hive integration.
+     */
+    public void testHiveCommandLine() throws Exception {
+        assertEquals(0, executeHiveQuery(
+            "create table table_a (" +
+                "id_a int," +
+                "id_b int" +
+            ") " +
+            "row format delimited fields terminated by '\\t'" +
+            "stored as textfile " +
+            "location '/table-a'"
+        ));
+
+        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "data-a").getAbsolutePath(), "/table-a"));
+
+        assertEquals(0, executeHiveQuery(
+            "create table table_b (" +
+                "id_b int," +
+                "rndv int" +
+            ") " +
+            "row format delimited fields terminated by '\\t'" +
+            "stored as textfile " +
+            "location '/table-b'"
+        ));
+
+        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "data-b").getAbsolutePath(), "/table-b"));
+
+        checkQuery(
+            "0 0\n" +
+            "1 2\n" +
+            "2 4\n" +
+            "3 6\n" +
+            "4 8\n" +
+            "5 10\n" +
+            "6 12\n" +
+            "7 14\n" +
+            "8 16\n" +
+            "9 18\n",
+            "select * from table_a order by id_a limit 10"
+        );
+
+        checkQuery("2000\n", "select count(id_b) from table_b");
+
+        checkQuery(
+            "250 500 2002\n" +
+            "251 502 2006\n" +
+            "252 504 2010\n" +
+            "253 506 2014\n" +
+            "254 508 2018\n" +
+            "255 510 2022\n" +
+            "256 512 2026\n" +
+            "257 514 2030\n" +
+            "258 516 2034\n" +
+            "259 518 2038\n",
+            "select a.id_a, a.id_b, b.rndv" +
+            " from table_a a" +
+            " inner join table_b b on a.id_b = b.id_b" +
+            " where b.rndv > 2000" +
+            " order by a.id_a limit 10"
+        );
+
+        checkQuery("1000\n", "select count(b.id_b) from table_a a inner join table_b b on a.id_b = b.id_b");
+    }
+}
\ No newline at end of file


[48/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
new file mode 100644
index 0000000..b9c20c3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.hadoop.mapreduce.IgniteHadoopMapReducePlanner;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
+import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffle;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopEmbeddedTaskExecutor;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Hadoop processor.
+ */
+public class HadoopProcessor extends HadoopProcessorAdapter {
+    /** Job ID counter. */
+    private final AtomicInteger idCtr = new AtomicInteger();
+
+    /** Hadoop context. */
+    @GridToStringExclude
+    private HadoopContext hctx;
+
+    /** Hadoop facade for public API. */
+    @GridToStringExclude
+    private Hadoop hadoop;
+
+    /**
+     * Constructor.
+     *
+     * @param ctx Kernal context.
+     */
+    public HadoopProcessor(GridKernalContext ctx) {
+        super(ctx);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteCheckedException {
+        if (ctx.isDaemon())
+            return;
+
+        HadoopConfiguration cfg = ctx.config().getHadoopConfiguration();
+
+        if (cfg == null)
+            cfg = new HadoopConfiguration();
+        else
+            cfg = new HadoopConfiguration(cfg);
+
+        initializeDefaults(cfg);
+
+        hctx = new HadoopContext(
+            ctx,
+            cfg,
+            new HadoopJobTracker(),
+            new HadoopEmbeddedTaskExecutor(),
+            // TODO: IGNITE-404: Uncomment when fixed.
+            //cfg.isExternalExecution() ? new HadoopExternalTaskExecutor() : new HadoopEmbeddedTaskExecutor(),
+            new HadoopShuffle());
+
+        for (HadoopComponent c : hctx.components())
+            c.start(hctx);
+
+        hadoop = new HadoopImpl(this);
+
+        ctx.addNodeAttribute(HadoopAttributes.NAME, new HadoopAttributes(cfg));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onKernalStart() throws IgniteCheckedException {
+        super.onKernalStart();
+
+        if (hctx == null)
+            return;
+
+        for (HadoopComponent c : hctx.components())
+            c.onKernalStart();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onKernalStop(boolean cancel) {
+        super.onKernalStop(cancel);
+
+        if (hctx == null)
+            return;
+
+        List<HadoopComponent> components = hctx.components();
+
+        for (ListIterator<HadoopComponent> it = components.listIterator(components.size()); it.hasPrevious();) {
+            HadoopComponent c = it.previous();
+
+            c.onKernalStop(cancel);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop(boolean cancel) throws IgniteCheckedException {
+        super.stop(cancel);
+
+        if (hctx == null)
+            return;
+
+        List<HadoopComponent> components = hctx.components();
+
+        for (ListIterator<HadoopComponent> it = components.listIterator(components.size()); it.hasPrevious();) {
+            HadoopComponent c = it.previous();
+
+            c.stop(cancel);
+        }
+    }
+
+    /**
+     * Gets Hadoop context.
+     *
+     * @return Hadoop context.
+     */
+    public HadoopContext context() {
+        return hctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Hadoop hadoop() {
+        if (hadoop == null)
+            throw new IllegalStateException("Hadoop accelerator is disabled (Hadoop is not in classpath, " +
+                "is HADOOP_HOME environment variable set?)");
+
+        return hadoop;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration config() {
+        return hctx.configuration();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobId nextJobId() {
+        return new HadoopJobId(ctx.localNodeId(), idCtr.incrementAndGet());
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<?> submit(HadoopJobId jobId, HadoopJobInfo jobInfo) {
+        return hctx.jobTracker().submit(jobId, jobInfo);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
+        return hctx.jobTracker().status(jobId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException {
+        return hctx.jobTracker().jobCounters(jobId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
+        return hctx.jobTracker().finishFuture(jobId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException {
+        return hctx.jobTracker().killJob(jobId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void validateEnvironment() throws IgniteCheckedException {
+        // Perform some static checks as early as possible, so that any recoverable exceptions are thrown here.
+        try {
+            HadoopLocations loc = HadoopClasspathUtils.locations();
+
+            if (!F.isEmpty(loc.home()))
+                U.quietAndInfo(log, HadoopClasspathUtils.HOME + " is set to " + loc.home());
+
+            U.quietAndInfo(log, "Resolved Hadoop classpath locations: " + loc.common() + ", " + loc.hdfs() + ", " +
+                loc.mapred());
+        }
+        catch (IOException ioe) {
+            throw new IgniteCheckedException(ioe.getMessage(), ioe);
+        }
+
+        HadoopClassLoader.hadoopUrls();
+    }
+
+    /**
+     * Initializes default hadoop configuration.
+     *
+     * @param cfg Hadoop configuration.
+     */
+    private void initializeDefaults(HadoopConfiguration cfg) {
+        if (cfg.getMapReducePlanner() == null)
+            cfg.setMapReducePlanner(new IgniteHadoopMapReducePlanner());
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopProcessor.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
new file mode 100644
index 0000000..ed39ce5
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileWriter;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Scanner;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR;
+import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT;
+
+/**
+ * Setup tool to configure Hadoop client.
+ */
+public class HadoopSetup {
+    /** */
+    public static final String WINUTILS_EXE = "winutils.exe";
+
+    /** */
+    private static final FilenameFilter IGNITE_JARS = new FilenameFilter() {
+        @Override public boolean accept(File dir, String name) {
+            return name.startsWith("ignite-") && name.endsWith(".jar");
+        }
+    };
+
+    /**
+     * The main method.
+     * @param ignore Params.
+     */
+    public static void main(String[] ignore) {
+        X.println(
+            "   __________  ________________ ",
+            "  /  _/ ___/ |/ /  _/_  __/ __/ ",
+            " _/ // (7 7    // /  / / / _/   ",
+            "/___/\\___/_/|_/___/ /_/ /___/  ",
+            "                for Apache Hadoop        ",
+            " ",
+            "ver. " + ACK_VER_STR,
+            COPYRIGHT);
+
+        configureHadoop();
+    }
+
+    /**
+     * This operation prepares the clean unpacked Hadoop distributive to work as client with Ignite-Hadoop.
+     * It performs these operations:
+     * <ul>
+     *     <li>Check for setting of HADOOP_HOME environment variable.</li>
+     *     <li>Try to resolve HADOOP_COMMON_HOME or evaluate it relative to HADOOP_HOME.</li>
+     *     <li>In Windows check if winutils.exe exists and try to fix issue with some restrictions.</li>
+     *     <li>In Windows check new line character issues in CMD scripts.</li>
+     *     <li>Scan Hadoop lib directory to detect Ignite JARs. If these don't exist tries to create ones.</li>
+     * </ul>
+     */
+    private static void configureHadoop() {
+        String igniteHome = U.getIgniteHome();
+
+        println("IGNITE_HOME is set to '" + igniteHome + "'.");
+
+        checkIgniteHome(igniteHome);
+
+        String homeVar = "HADOOP_HOME";
+        String hadoopHome = System.getenv(homeVar);
+
+        if (F.isEmpty(hadoopHome)) {
+            homeVar = "HADOOP_PREFIX";
+            hadoopHome = System.getenv(homeVar);
+        }
+
+        if (F.isEmpty(hadoopHome))
+            exit("Neither HADOOP_HOME nor HADOOP_PREFIX environment variable is set. Please set one of them to a " +
+                "valid Hadoop installation directory and run setup tool again.", null);
+
+        hadoopHome = hadoopHome.replaceAll("\"", "");
+
+        println(homeVar + " is set to '" + hadoopHome + "'.");
+
+        String hiveHome = System.getenv("HIVE_HOME");
+
+        if (!F.isEmpty(hiveHome)) {
+            hiveHome = hiveHome.replaceAll("\"", "");
+
+            println("HIVE_HOME is set to '" + hiveHome + "'.");
+        }
+
+        File hadoopDir = new File(hadoopHome);
+
+        if (!hadoopDir.exists())
+            exit("Hadoop installation folder does not exist.", null);
+
+        if (!hadoopDir.isDirectory())
+            exit("HADOOP_HOME must point to a directory.", null);
+
+        if (!hadoopDir.canRead())
+            exit("Hadoop installation folder can not be read. Please check permissions.", null);
+
+        final File hadoopCommonDir;
+
+        String hadoopCommonHome = System.getenv("HADOOP_COMMON_HOME");
+
+        if (F.isEmpty(hadoopCommonHome)) {
+            hadoopCommonDir = new File(hadoopDir, "share/hadoop/common");
+
+            println("HADOOP_COMMON_HOME is not set, will use '" + hadoopCommonDir.getPath() + "'.");
+        }
+        else {
+            println("HADOOP_COMMON_HOME is set to '" + hadoopCommonHome + "'.");
+
+            hadoopCommonDir = new File(hadoopCommonHome);
+        }
+
+        if (!hadoopCommonDir.canRead())
+            exit("Failed to read Hadoop common dir '" + hadoopCommonDir + "'.", null);
+
+        final File hadoopCommonLibDir = new File(hadoopCommonDir, "lib");
+
+        if (!hadoopCommonLibDir.canRead())
+            exit("Failed to read Hadoop 'lib' folder in '" + hadoopCommonLibDir.getPath() + "'.", null);
+
+        if (U.isWindows()) {
+            checkJavaPathSpaces();
+
+            final File hadoopBinDir = new File(hadoopDir, "bin");
+
+            if (!hadoopBinDir.canRead())
+                exit("Failed to read subdirectory 'bin' in HADOOP_HOME.", null);
+
+            File winutilsFile = new File(hadoopBinDir, WINUTILS_EXE);
+
+            if (!winutilsFile.exists()) {
+                if (ask("File '" + WINUTILS_EXE + "' does not exist. " +
+                    "It may be replaced by a stub. Create it?")) {
+                    println("Creating file stub '" + winutilsFile.getAbsolutePath() + "'.");
+
+                    boolean ok = false;
+
+                    try {
+                        ok = winutilsFile.createNewFile();
+                    }
+                    catch (IOException ignore) {
+                        // No-op.
+                    }
+
+                    if (!ok)
+                        exit("Failed to create '" + WINUTILS_EXE + "' file. Please check permissions.", null);
+                }
+                else
+                    println("Ok. But Hadoop client probably will not work on Windows this way...");
+            }
+
+            processCmdFiles(hadoopDir, "bin", "sbin", "libexec");
+        }
+
+        File igniteLibs = new File(new File(igniteHome), "libs");
+
+        if (!igniteLibs.exists())
+            exit("Ignite 'libs' folder is not found.", null);
+
+        Collection<File> jarFiles = new ArrayList<>();
+
+        addJarsInFolder(jarFiles, igniteLibs);
+        addJarsInFolder(jarFiles, new File(igniteLibs, "ignite-hadoop"));
+        addJarsInFolder(jarFiles, new File(igniteLibs, "ignite-hadoop-impl"));
+
+        boolean jarsLinksCorrect = true;
+
+        for (File file : jarFiles) {
+            File link = new File(hadoopCommonLibDir, file.getName());
+
+            jarsLinksCorrect &= isJarLinkCorrect(link, file);
+
+            if (!jarsLinksCorrect)
+                break;
+        }
+
+        if (!jarsLinksCorrect) {
+            if (ask("Ignite JAR files are not found in Hadoop 'lib' directory. " +
+                "Create appropriate symbolic links?")) {
+                File[] oldIgniteJarFiles = hadoopCommonLibDir.listFiles(IGNITE_JARS);
+
+                if (oldIgniteJarFiles.length > 0 && ask("The Hadoop 'lib' directory contains JARs from other Ignite " +
+                    "installation. They must be deleted to continue. Continue?")) {
+                    for (File file : oldIgniteJarFiles) {
+                        println("Deleting file '" + file.getAbsolutePath() + "'.");
+
+                        if (!file.delete())
+                            exit("Failed to delete file '" + file.getPath() + "'.", null);
+                    }
+                }
+
+                for (File file : jarFiles) {
+                    File targetFile = new File(hadoopCommonLibDir, file.getName());
+
+                    try {
+                        println("Creating symbolic link '" + targetFile.getAbsolutePath() + "'.");
+
+                        Files.createSymbolicLink(targetFile.toPath(), file.toPath());
+                    }
+                    catch (IOException e) {
+                        if (U.isWindows()) {
+                            warn("Ability to create symbolic links is required!");
+                            warn("On Windows platform you have to grant permission 'Create symbolic links'");
+                            warn("to your user or run the Accelerator as Administrator.");
+                        }
+
+                        exit("Creating symbolic link failed! Check permissions.", e);
+                    }
+                }
+            }
+            else
+                println("Ok. But Hadoop client will not be able to talk to Ignite cluster without those JARs in classpath...");
+        }
+
+        File hadoopEtc = new File(hadoopDir, "etc" + File.separator + "hadoop");
+
+        File igniteHadoopCfg = igniteHadoopConfig(igniteHome);
+
+        if (!igniteHadoopCfg.canRead())
+            exit("Failed to read Ignite Hadoop 'config' folder at '" + igniteHadoopCfg.getAbsolutePath() + "'.", null);
+
+        if (hadoopEtc.canWrite()) { // TODO Bigtop
+            if (ask("Replace 'core-site.xml' and 'mapred-site.xml' files with preconfigured templates " +
+                "(existing files will be backed up)?")) {
+                replaceWithBackup(new File(igniteHadoopCfg, "core-site.ignite.xml"),
+                    new File(hadoopEtc, "core-site.xml"));
+
+                replaceWithBackup(new File(igniteHadoopCfg, "mapred-site.ignite.xml"),
+                    new File(hadoopEtc, "mapred-site.xml"));
+            }
+            else
+                println("Ok. You can configure them later, the templates are available at Ignite's 'docs' directory...");
+        }
+
+        if (!F.isEmpty(hiveHome)) {
+            File hiveConfDir = new File(hiveHome + File.separator + "conf");
+
+            if (!hiveConfDir.canWrite())
+                warn("Can not write to '" + hiveConfDir.getAbsolutePath() + "'. To run Hive queries you have to " +
+                    "configure 'hive-site.xml' manually. The template is available at Ignite's 'docs' directory.");
+            else if (ask("Replace 'hive-site.xml' with preconfigured template (existing file will be backed up)?"))
+                replaceWithBackup(new File(igniteHadoopCfg, "hive-site.ignite.xml"),
+                    new File(hiveConfDir, "hive-site.xml"));
+            else
+                println("Ok. You can configure it later, the template is available at Ignite's 'docs' directory...");
+        }
+
+        println("Apache Hadoop setup is complete.");
+    }
+
+    /**
+     * Get Ignite Hadoop config directory.
+     *
+     * @param igniteHome Ignite home.
+     * @return Ignite Hadoop config directory.
+     */
+    private static File igniteHadoopConfig(String igniteHome) {
+        Path path = Paths.get(igniteHome, "modules", "hadoop", "config");
+
+        if (!Files.exists(path))
+            path = Paths.get(igniteHome, "config", "hadoop");
+
+        if (Files.exists(path))
+            return path.toFile();
+        else
+            return new File(igniteHome, "docs");
+    }
+
+    /**
+     * @param jarFiles Jars.
+     * @param folder Folder.
+     */
+    private static void addJarsInFolder(Collection<File> jarFiles, File folder) {
+        if (!folder.exists())
+            exit("Folder '" + folder.getAbsolutePath() + "' is not found.", null);
+
+        jarFiles.addAll(Arrays.asList(folder.listFiles(IGNITE_JARS)));
+    }
+
+    /**
+     * Checks that JAVA_HOME does not contain space characters.
+     */
+    private static void checkJavaPathSpaces() {
+        String javaHome = System.getProperty("java.home");
+
+        if (javaHome.contains(" ")) {
+            warn("Java installation path contains space characters!");
+            warn("Hadoop client will not be able to start using '" + javaHome + "'.");
+            warn("Please install JRE to path which does not contain spaces and point JAVA_HOME to that installation.");
+        }
+    }
+
+    /**
+     * Checks Ignite home.
+     *
+     * @param igniteHome Ignite home.
+     */
+    private static void checkIgniteHome(String igniteHome) {
+        URL jarUrl = U.class.getProtectionDomain().getCodeSource().getLocation();
+
+        try {
+            Path jar = Paths.get(jarUrl.toURI());
+            Path igHome = Paths.get(igniteHome);
+
+            if (!jar.startsWith(igHome))
+                exit("Ignite JAR files are not under IGNITE_HOME.", null);
+        }
+        catch (Exception e) {
+            exit(e.getMessage(), e);
+        }
+    }
+
+    /**
+     * Replaces target file with source file.
+     *
+     * @param from From.
+     * @param to To.
+     */
+    private static void replaceWithBackup(File from, File to) {
+        if (!from.canRead())
+            exit("Failed to read source file '" + from.getAbsolutePath() + "'.", null);
+
+        println("Replacing file '" + to.getAbsolutePath() + "'.");
+
+        try {
+            U.copy(from, renameToBak(to), true);
+        }
+        catch (IOException e) {
+            exit("Failed to replace file '" + to.getAbsolutePath() + "'.", e);
+        }
+    }
+
+    /**
+     * Renames file for backup.
+     *
+     * @param file File.
+     * @return File.
+     */
+    private static File renameToBak(File file) {
+        DateFormat fmt = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss");
+
+        if (file.exists() && !file.renameTo(new File(file.getAbsolutePath() + "." + fmt.format(new Date()) + ".bak")))
+            exit("Failed to rename file '" + file.getPath() + "'.", null);
+
+        return file;
+    }
+
+    /**
+     * Checks if link is correct.
+     *
+     * @param link Symbolic link.
+     * @param correctTarget Correct link target.
+     * @return {@code true} If link target is correct.
+     */
+    private static boolean isJarLinkCorrect(File link, File correctTarget) {
+        if (!Files.isSymbolicLink(link.toPath()))
+            return false; // It is a real file or it does not exist.
+
+        Path target = null;
+
+        try {
+            target = Files.readSymbolicLink(link.toPath());
+        }
+        catch (IOException e) {
+            exit("Failed to read symbolic link: " + link.getAbsolutePath(), e);
+        }
+
+        return Files.exists(target) && target.toFile().equals(correctTarget);
+    }
+
+    /**
+     * Writes the question end read the boolean answer from the console.
+     *
+     * @param question Question to write.
+     * @return {@code true} if user inputs 'Y' or 'y', {@code false} otherwise.
+     */
+    private static boolean ask(String question) {
+        X.println();
+        X.print(" <  " + question + " (Y/N): ");
+
+        String answer = null;
+
+        if (!F.isEmpty(System.getenv("IGNITE_HADOOP_SETUP_YES")))
+            answer = "Y";
+        else {
+            BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
+
+            try {
+                answer = br.readLine();
+            }
+            catch (IOException e) {
+                exit("Failed to read answer: " + e.getMessage(), e);
+            }
+        }
+
+        if (answer != null && "Y".equals(answer.toUpperCase().trim())) {
+            X.println(" >  Yes.");
+
+            return true;
+        }
+        else {
+            X.println(" >  No.");
+
+            return false;
+        }
+    }
+
+    /**
+     * Exit with message.
+     *
+     * @param msg Exit message.
+     */
+    private static void exit(String msg, Exception e) {
+        X.println("    ");
+        X.println("  # " + msg);
+        X.println("  # Setup failed, exiting... ");
+
+        if (e != null && !F.isEmpty(System.getenv("IGNITE_HADOOP_SETUP_DEBUG")))
+            e.printStackTrace();
+
+        System.exit(1);
+    }
+
+    /**
+     * Prints message.
+     *
+     * @param msg Message.
+     */
+    private static void println(String msg) {
+        X.println("  > " + msg);
+    }
+
+    /**
+     * Prints warning.
+     *
+     * @param msg Message.
+     */
+    private static void warn(String msg) {
+        X.println("  ! " + msg);
+    }
+
+    /**
+     * Checks that CMD files have valid MS Windows new line characters. If not, writes question to console and reads the
+     * answer. If it's 'Y' then backups original files and corrects invalid new line characters.
+     *
+     * @param rootDir Root directory to process.
+     * @param dirs Directories inside of the root to process.
+     */
+    private static void processCmdFiles(File rootDir, String... dirs) {
+        boolean answer = false;
+
+        for (String dir : dirs) {
+            File subDir = new File(rootDir, dir);
+
+            File[] cmdFiles = subDir.listFiles(new FilenameFilter() {
+                @Override public boolean accept(File dir, String name) {
+                    return name.toLowerCase().endsWith(".cmd");
+                }
+            });
+
+            for (File file : cmdFiles) {
+                String content = null;
+
+                try (Scanner scanner = new Scanner(file)) {
+                    content = scanner.useDelimiter("\\Z").next();
+                }
+                catch (FileNotFoundException e) {
+                    exit("Failed to read file '" + file + "'.", e);
+                }
+
+                boolean invalid = false;
+
+                for (int i = 0; i < content.length(); i++) {
+                    if (content.charAt(i) == '\n' && (i == 0 || content.charAt(i - 1) != '\r')) {
+                        invalid = true;
+
+                        break;
+                    }
+                }
+
+                if (invalid) {
+                    answer = answer || ask("One or more *.CMD files has invalid new line character. Replace them?");
+
+                    if (!answer) {
+                        println("Ok. But Windows most probably will fail to execute them...");
+
+                        return;
+                    }
+
+                    println("Fixing newline characters in file '" + file.getAbsolutePath() + "'.");
+
+                    renameToBak(file);
+
+                    try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) {
+                        for (int i = 0; i < content.length(); i++) {
+                            if (content.charAt(i) == '\n' && (i == 0 || content.charAt(i - 1) != '\r'))
+                                writer.write("\r");
+
+                            writer.write(content.charAt(i));
+                        }
+                    }
+                    catch (IOException e) {
+                        exit("Failed to write file '" + file.getPath() + "': " + e.getMessage(), e);
+                    }
+                }
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
new file mode 100644
index 0000000..1dc8674
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteException;
+
+/**
+ * Exception that throws when the task is cancelling.
+ */
+public class HadoopTaskCancelledException extends IgniteException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * @param msg Exception message.
+     */
+    public HadoopTaskCancelledException(String msg) {
+        super(msg);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
new file mode 100644
index 0000000..83ccdf0
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
@@ -0,0 +1,368 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobPriority;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopSplitWrapper;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop utility methods.
+ */
+public class HadoopUtils {
+    /** Property to store timestamp of new job id request. */
+    public static final String REQ_NEW_JOBID_TS_PROPERTY = "ignite.job.requestNewIdTs";
+
+    /** Property to store timestamp of response of new job id request. */
+    public static final String RESPONSE_NEW_JOBID_TS_PROPERTY = "ignite.job.responseNewIdTs";
+
+    /** Property to store timestamp of job submission. */
+    public static final String JOB_SUBMISSION_START_TS_PROPERTY = "ignite.job.submissionStartTs";
+
+    /** Property to set custom writer of job statistics. */
+    public static final String JOB_COUNTER_WRITER_PROPERTY = "ignite.counters.writer";
+
+    /** Staging constant. */
+    private static final String STAGING_CONSTANT = ".staging";
+
+    /** Old mapper class attribute. */
+    private static final String OLD_MAP_CLASS_ATTR = "mapred.mapper.class";
+
+    /** Old reducer class attribute. */
+    private static final String OLD_REDUCE_CLASS_ATTR = "mapred.reducer.class";
+
+    /**
+     * Constructor.
+     */
+    private HadoopUtils() {
+        // No-op.
+    }
+
+    /**
+     * Wraps native split.
+     *
+     * @param id Split ID.
+     * @param split Split.
+     * @param hosts Hosts.
+     * @throws IOException If failed.
+     */
+    public static HadoopSplitWrapper wrapSplit(int id, Object split, String[] hosts) throws IOException {
+        ByteArrayOutputStream arr = new ByteArrayOutputStream();
+        ObjectOutput out = new ObjectOutputStream(arr);
+
+        assert split instanceof Writable;
+
+        ((Writable)split).write(out);
+
+        out.flush();
+
+        return new HadoopSplitWrapper(id, split.getClass().getName(), arr.toByteArray(), hosts);
+    }
+
+    /**
+     * Unwraps native split.
+     *
+     * @param o Wrapper.
+     * @return Split.
+     */
+    public static Object unwrapSplit(HadoopSplitWrapper o) {
+        try {
+            Writable w = (Writable)HadoopUtils.class.getClassLoader().loadClass(o.className()).newInstance();
+
+            w.readFields(new ObjectInputStream(new ByteArrayInputStream(o.bytes())));
+
+            return w;
+        }
+        catch (Exception e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    /**
+     * Convert Ignite job status to Hadoop job status.
+     *
+     * @param status Ignite job status.
+     * @return Hadoop job status.
+     */
+    public static JobStatus status(HadoopJobStatus status, Configuration conf) {
+        JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId());
+
+        float setupProgress = 0;
+        float mapProgress = 0;
+        float reduceProgress = 0;
+        float cleanupProgress = 0;
+
+        JobStatus.State state = JobStatus.State.RUNNING;
+
+        switch (status.jobPhase()) {
+            case PHASE_SETUP:
+                setupProgress = 0.42f;
+
+                break;
+
+            case PHASE_MAP:
+                setupProgress = 1;
+                mapProgress = 1f - status.pendingMapperCnt() / (float)status.totalMapperCnt();
+
+                break;
+
+            case PHASE_REDUCE:
+                setupProgress = 1;
+                mapProgress = 1;
+
+                if (status.totalReducerCnt() > 0)
+                    reduceProgress = 1f - status.pendingReducerCnt() / (float)status.totalReducerCnt();
+                else
+                    reduceProgress = 1f;
+
+                break;
+
+            case PHASE_CANCELLING:
+            case PHASE_COMPLETE:
+                if (!status.isFailed()) {
+                    setupProgress = 1;
+                    mapProgress = 1;
+                    reduceProgress = 1;
+                    cleanupProgress = 1;
+
+                    state = JobStatus.State.SUCCEEDED;
+                }
+                else
+                    state = JobStatus.State.FAILED;
+
+                break;
+
+            default:
+                assert false;
+        }
+
+        return new JobStatus(jobId, setupProgress, mapProgress, reduceProgress, cleanupProgress, state,
+            JobPriority.NORMAL, status.user(), status.jobName(), jobFile(conf, status.user(), jobId).toString(), "N/A");
+    }
+
+    /**
+     * Gets staging area directory.
+     *
+     * @param conf Configuration.
+     * @param usr User.
+     * @return Staging area directory.
+     */
+    public static Path stagingAreaDir(Configuration conf, String usr) {
+        return new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR, MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+            + Path.SEPARATOR + usr + Path.SEPARATOR + STAGING_CONSTANT);
+    }
+
+    /**
+     * Gets job file.
+     *
+     * @param conf Configuration.
+     * @param usr User.
+     * @param jobId Job ID.
+     * @return Job file.
+     */
+    public static Path jobFile(Configuration conf, String usr, JobID jobId) {
+        return new Path(stagingAreaDir(conf, usr), jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
+    }
+
+    /**
+     * Checks the attribute in configuration is not set.
+     *
+     * @param attr Attribute name.
+     * @param msg Message for creation of exception.
+     * @throws IgniteCheckedException If attribute is set.
+     */
+    public static void ensureNotSet(Configuration cfg, String attr, String msg) throws IgniteCheckedException {
+        if (cfg.get(attr) != null)
+            throw new IgniteCheckedException(attr + " is incompatible with " + msg + " mode.");
+    }
+
+    /**
+     * Creates JobInfo from hadoop configuration.
+     *
+     * @param cfg Hadoop configuration.
+     * @return Job info.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static HadoopDefaultJobInfo createJobInfo(Configuration cfg) throws IgniteCheckedException {
+        JobConf jobConf = new JobConf(cfg);
+
+        boolean hasCombiner = jobConf.get("mapred.combiner.class") != null
+                || jobConf.get(MRJobConfig.COMBINE_CLASS_ATTR) != null;
+
+        int numReduces = jobConf.getNumReduceTasks();
+
+        jobConf.setBooleanIfUnset("mapred.mapper.new-api", jobConf.get(OLD_MAP_CLASS_ATTR) == null);
+
+        if (jobConf.getUseNewMapper()) {
+            String mode = "new map API";
+
+            ensureNotSet(jobConf, "mapred.input.format.class", mode);
+            ensureNotSet(jobConf, OLD_MAP_CLASS_ATTR, mode);
+
+            if (numReduces != 0)
+                ensureNotSet(jobConf, "mapred.partitioner.class", mode);
+            else
+                ensureNotSet(jobConf, "mapred.output.format.class", mode);
+        }
+        else {
+            String mode = "map compatibility";
+
+            ensureNotSet(jobConf, MRJobConfig.INPUT_FORMAT_CLASS_ATTR, mode);
+            ensureNotSet(jobConf, MRJobConfig.MAP_CLASS_ATTR, mode);
+
+            if (numReduces != 0)
+                ensureNotSet(jobConf, MRJobConfig.PARTITIONER_CLASS_ATTR, mode);
+            else
+                ensureNotSet(jobConf, MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR, mode);
+        }
+
+        if (numReduces != 0) {
+            jobConf.setBooleanIfUnset("mapred.reducer.new-api", jobConf.get(OLD_REDUCE_CLASS_ATTR) == null);
+
+            if (jobConf.getUseNewReducer()) {
+                String mode = "new reduce API";
+
+                ensureNotSet(jobConf, "mapred.output.format.class", mode);
+                ensureNotSet(jobConf, OLD_REDUCE_CLASS_ATTR, mode);
+            }
+            else {
+                String mode = "reduce compatibility";
+
+                ensureNotSet(jobConf, MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR, mode);
+                ensureNotSet(jobConf, MRJobConfig.REDUCE_CLASS_ATTR, mode);
+            }
+        }
+
+        Map<String, String> props = new HashMap<>();
+
+        for (Map.Entry<String, String> entry : jobConf)
+            props.put(entry.getKey(), entry.getValue());
+
+        return new HadoopDefaultJobInfo(jobConf.getJobName(), jobConf.getUser(), hasCombiner, numReduces, props);
+    }
+
+    /**
+     * Throws new {@link IgniteCheckedException} with original exception is serialized into string.
+     * This is needed to transfer error outside the current class loader.
+     *
+     * @param e Original exception.
+     * @return IgniteCheckedException New exception.
+     */
+    public static IgniteCheckedException transformException(Throwable e) {
+        ByteArrayOutputStream os = new ByteArrayOutputStream();
+
+        e.printStackTrace(new PrintStream(os, true));
+
+        return new IgniteCheckedException(os.toString());
+    }
+
+    /**
+     * Returns work directory for job execution.
+     *
+     * @param locNodeId Local node ID.
+     * @param jobId Job ID.
+     * @return Working directory for job.
+     * @throws IgniteCheckedException If Failed.
+     */
+    public static File jobLocalDir(UUID locNodeId, HadoopJobId jobId) throws IgniteCheckedException {
+        return new File(new File(U.resolveWorkDirectory("hadoop", false), "node-" + locNodeId), "job_" + jobId);
+    }
+
+    /**
+     * Returns subdirectory of job working directory for task execution.
+     *
+     * @param locNodeId Local node ID.
+     * @param info Task info.
+     * @return Working directory for task.
+     * @throws IgniteCheckedException If Failed.
+     */
+    public static File taskLocalDir(UUID locNodeId, HadoopTaskInfo info) throws IgniteCheckedException {
+        File jobLocDir = jobLocalDir(locNodeId, info.jobId());
+
+        return new File(jobLocDir, info.type() + "_" + info.taskNumber() + "_" + info.attempt());
+    }
+
+    /**
+     * Creates {@link Configuration} in a correct class loader context to avoid caching
+     * of inappropriate class loader in the Configuration object.
+     * @return New instance of {@link Configuration}.
+     */
+    public static Configuration safeCreateConfiguration() {
+        final ClassLoader oldLdr = setContextClassLoader(Configuration.class.getClassLoader());
+
+        try {
+            return new Configuration();
+        }
+        finally {
+            restoreContextClassLoader(oldLdr);
+        }
+    }
+
+
+
+    /**
+     * Set context class loader.
+     *
+     * @param newLdr New class loader.
+     * @return Old class loader.
+     */
+    @Nullable public static ClassLoader setContextClassLoader(@Nullable ClassLoader newLdr) {
+        ClassLoader oldLdr = Thread.currentThread().getContextClassLoader();
+
+        if (newLdr != oldLdr)
+            Thread.currentThread().setContextClassLoader(newLdr);
+
+        return oldLdr;
+    }
+
+    /**
+     * Restore context class loader.
+     *
+     * @param oldLdr Original class loader.
+     */
+    public static void restoreContextClassLoader(@Nullable ClassLoader oldLdr) {
+        ClassLoader newLdr = Thread.currentThread().getContextClassLoader();
+
+        if (newLdr != oldLdr)
+            Thread.currentThread().setContextClassLoader(oldLdr);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
new file mode 100644
index 0000000..3f682d3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.counter;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Default Hadoop counter implementation.
+ */
+public abstract class HadoopCounterAdapter implements HadoopCounter, Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Counter group name. */
+    private String grp;
+
+    /** Counter name. */
+    private String name;
+
+    /**
+     * Default constructor required by {@link Externalizable}.
+     */
+    protected HadoopCounterAdapter() {
+        // No-op.
+    }
+
+    /**
+     * Creates new counter with given group and name.
+     *
+     * @param grp Counter group name.
+     * @param name Counter name.
+     */
+    protected HadoopCounterAdapter(String grp, String name) {
+        assert grp != null : "counter must have group";
+        assert name != null : "counter must have name";
+
+        this.grp = grp;
+        this.name = name;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String name() {
+        return name;
+    }
+
+    /** {@inheritDoc} */
+    @Override @Nullable public String group() {
+        return grp;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeUTF(grp);
+        out.writeUTF(name);
+        writeValue(out);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        grp = in.readUTF();
+        name = in.readUTF();
+        readValue(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        HadoopCounterAdapter cntr = (HadoopCounterAdapter)o;
+
+        if (!grp.equals(cntr.grp))
+            return false;
+        if (!name.equals(cntr.name))
+            return false;
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = grp.hashCode();
+        res = 31 * res + name.hashCode();
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopCounterAdapter.class, this);
+    }
+
+    /**
+     * Writes value of this counter to output.
+     *
+     * @param out Output.
+     * @throws IOException If failed.
+     */
+    protected abstract void writeValue(ObjectOutput out) throws IOException;
+
+    /**
+     * Read value of this counter from input.
+     *
+     * @param in Input.
+     * @throws IOException If failed.
+     */
+    protected abstract void readValue(ObjectInput in) throws IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
new file mode 100644
index 0000000..f3b5463
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.counter;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.lang.reflect.Constructor;
+import java.util.Collection;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.util.lang.GridTuple3;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jsr166.ConcurrentHashMap8;
+
+/**
+ * Default in-memory counters store.
+ */
+public class HadoopCountersImpl implements HadoopCounters, Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private final ConcurrentMap<CounterKey, HadoopCounter> cntrsMap = new ConcurrentHashMap8<>();
+
+    /**
+     * Default constructor. Creates new instance without counters.
+     */
+    public HadoopCountersImpl() {
+        // No-op.
+    }
+
+    /**
+     * Creates new instance that contain given counters.
+     *
+     * @param cntrs Counters to store.
+     */
+    public HadoopCountersImpl(Iterable<HadoopCounter> cntrs) {
+        addCounters(cntrs, true);
+    }
+
+    /**
+     * Copy constructor.
+     *
+     * @param cntrs Counters to copy.
+     */
+    public HadoopCountersImpl(HadoopCounters cntrs) {
+        this(cntrs.all());
+    }
+
+    /**
+     * Creates counter instance.
+     *
+     * @param cls Class of the counter.
+     * @param grp Group name.
+     * @param name Counter name.
+     * @return Counter.
+     */
+    private <T extends HadoopCounter> T createCounter(Class<? extends HadoopCounter> cls, String grp,
+        String name) {
+        try {
+            Constructor constructor = cls.getConstructor(String.class, String.class);
+
+            return (T)constructor.newInstance(grp, name);
+        }
+        catch (Exception e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /**
+     * Adds counters collection in addition to existing counters.
+     *
+     * @param cntrs Counters to add.
+     * @param cp Whether to copy counters or not.
+     */
+    private void addCounters(Iterable<HadoopCounter> cntrs, boolean cp) {
+        assert cntrs != null;
+
+        for (HadoopCounter cntr : cntrs) {
+            if (cp) {
+                HadoopCounter cntrCp = createCounter(cntr.getClass(), cntr.group(), cntr.name());
+
+                cntrCp.merge(cntr);
+
+                cntr = cntrCp;
+            }
+
+            cntrsMap.put(new CounterKey(cntr.getClass(), cntr.group(), cntr.name()), cntr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public <T extends HadoopCounter> T counter(String grp, String name, Class<T> cls) {
+        assert cls != null;
+
+        CounterKey mapKey = new CounterKey(cls, grp, name);
+
+        T cntr = (T)cntrsMap.get(mapKey);
+
+        if (cntr == null) {
+            cntr = createCounter(cls, grp, name);
+
+            T old = (T)cntrsMap.putIfAbsent(mapKey, cntr);
+
+            if (old != null)
+                return old;
+        }
+
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<HadoopCounter> all() {
+        return cntrsMap.values();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void merge(HadoopCounters other) {
+        for (HadoopCounter counter : other.all())
+            counter(counter.group(), counter.name(), counter.getClass()).merge(counter);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        U.writeCollection(out, cntrsMap.values());
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        addCounters(U.<HadoopCounter>readCollection(in), false);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        HadoopCountersImpl counters = (HadoopCountersImpl)o;
+
+        return cntrsMap.equals(counters.cntrsMap);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return cntrsMap.hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopCountersImpl.class, this, "counters", cntrsMap.values());
+    }
+
+    /**
+     * The tuple of counter identifier components for more readable code.
+     */
+    private static class CounterKey extends GridTuple3<Class<? extends HadoopCounter>, String, String> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /**
+         * Constructor.
+         *
+         * @param cls Class of the counter.
+         * @param grp Group name.
+         * @param name Counter name.
+         */
+        private CounterKey(Class<? extends HadoopCounter> cls, String grp, String name) {
+            super(cls, grp, name);
+        }
+
+        /**
+         * Empty constructor required by {@link Externalizable}.
+         */
+        public CounterKey() {
+            // No-op.
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
new file mode 100644
index 0000000..0d61e0d
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.counter;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Standard hadoop counter to use via original Hadoop API in Hadoop jobs.
+ */
+public class HadoopLongCounter extends HadoopCounterAdapter {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** The counter value. */
+    private long val;
+
+    /**
+     * Default constructor required by {@link Externalizable}.
+     */
+    public HadoopLongCounter() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param grp Group name.
+     * @param name Counter name.
+     */
+    public HadoopLongCounter(String grp, String name) {
+        super(grp, name);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void writeValue(ObjectOutput out) throws IOException {
+        out.writeLong(val);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void readValue(ObjectInput in) throws IOException {
+        val = in.readLong();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void merge(HadoopCounter cntr) {
+        val += ((HadoopLongCounter)cntr).val;
+    }
+
+    /**
+     * Gets current value of this counter.
+     *
+     * @return Current value.
+     */
+    public long value() {
+        return val;
+    }
+
+    /**
+     * Sets current value by the given value.
+     *
+     * @param val Value to set.
+     */
+    public void value(long val) {
+        this.val = val;
+    }
+
+    /**
+     * Increment this counter by the given value.
+     *
+     * @param i Value to increase this counter by.
+     */
+    public void increment(long i) {
+        val += i;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
new file mode 100644
index 0000000..dedc6b3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.counter;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_SUBMISSION_START_TS_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.REQ_NEW_JOBID_TS_PROPERTY;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.RESPONSE_NEW_JOBID_TS_PROPERTY;
+
+/**
+ * Counter for the job statistics accumulation.
+ */
+public class HadoopPerformanceCounter extends HadoopCounterAdapter {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** The group name for this counter. */
+    private static final String GROUP_NAME = "SYSTEM";
+
+    /** The counter name for this counter. */
+    private static final String COUNTER_NAME = "PERFORMANCE";
+
+    /** Events collections. */
+    private Collection<T2<String,Long>> evts = new ArrayList<>();
+
+    /** Node id to insert into the event info. */
+    private UUID nodeId;
+
+    /** */
+    private int reducerNum;
+
+    /** */
+    private volatile Long firstShuffleMsg;
+
+    /** */
+    private volatile Long lastShuffleMsg;
+
+    /**
+     * Default constructor required by {@link Externalizable}.
+     */
+    public HadoopPerformanceCounter() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param grp Group name.
+     * @param name Counter name.
+     */
+    public HadoopPerformanceCounter(String grp, String name) {
+        super(grp, name);
+    }
+
+    /**
+     * Constructor to create instance to use this as helper.
+     *
+     * @param nodeId Id of the work node.
+     */
+    public HadoopPerformanceCounter(UUID nodeId) {
+        this.nodeId = nodeId;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void writeValue(ObjectOutput out) throws IOException {
+        U.writeCollection(out, evts);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void readValue(ObjectInput in) throws IOException {
+        try {
+            evts = U.readCollection(in);
+        }
+        catch (ClassNotFoundException e) {
+            throw new IOException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void merge(HadoopCounter cntr) {
+        evts.addAll(((HadoopPerformanceCounter)cntr).evts);
+    }
+
+    /**
+     * Gets the events collection.
+     *
+     * @return Collection of event.
+     */
+    public Collection<T2<String, Long>> evts() {
+        return evts;
+    }
+
+    /**
+     * Generate name that consists of some event information.
+     *
+     * @param info Task info.
+     * @param evtType The type of the event.
+     * @return String contains necessary event information.
+     */
+    private String eventName(HadoopTaskInfo info, String evtType) {
+        return eventName(info.type().toString(), info.taskNumber(), evtType);
+    }
+
+    /**
+     * Generate name that consists of some event information.
+     *
+     * @param taskType Task type.
+     * @param taskNum Number of the task.
+     * @param evtType The type of the event.
+     * @return String contains necessary event information.
+     */
+    private String eventName(String taskType, int taskNum, String evtType) {
+        assert nodeId != null;
+
+        return taskType + " " + taskNum + " " + evtType + " " + nodeId;
+    }
+
+    /**
+     * Adds event of the task submission (task instance creation).
+     *
+     * @param info Task info.
+     * @param ts Timestamp of the event.
+     */
+    public void onTaskSubmit(HadoopTaskInfo info, long ts) {
+        evts.add(new T2<>(eventName(info, "submit"), ts));
+    }
+
+    /**
+     * Adds event of the task preparation.
+     *
+     * @param info Task info.
+     * @param ts Timestamp of the event.
+     */
+    public void onTaskPrepare(HadoopTaskInfo info, long ts) {
+        evts.add(new T2<>(eventName(info, "prepare"), ts));
+    }
+
+    /**
+     * Adds event of the task finish.
+     *
+     * @param info Task info.
+     * @param ts Timestamp of the event.
+     */
+    public void onTaskFinish(HadoopTaskInfo info, long ts) {
+        if (info.type() == HadoopTaskType.REDUCE && lastShuffleMsg != null) {
+            evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "start"), firstShuffleMsg));
+            evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "finish"), lastShuffleMsg));
+
+            lastShuffleMsg = null;
+        }
+
+        evts.add(new T2<>(eventName(info, "finish"), ts));
+    }
+
+    /**
+     * Adds event of the task run.
+     *
+     * @param info Task info.
+     * @param ts Timestamp of the event.
+     */
+    public void onTaskStart(HadoopTaskInfo info, long ts) {
+        evts.add(new T2<>(eventName(info, "start"), ts));
+    }
+
+    /**
+     * Adds event of the job preparation.
+     *
+     * @param ts Timestamp of the event.
+     */
+    public void onJobPrepare(long ts) {
+        assert nodeId != null;
+
+        evts.add(new T2<>("JOB prepare " + nodeId, ts));
+    }
+
+    /**
+     * Adds event of the job start.
+     *
+     * @param ts Timestamp of the event.
+     */
+    public void onJobStart(long ts) {
+        assert nodeId != null;
+
+        evts.add(new T2<>("JOB start " + nodeId, ts));
+    }
+
+    /**
+     * Adds client submission events from job info.
+     *
+     * @param info Job info.
+     */
+    public void clientSubmissionEvents(HadoopJobInfo info) {
+        assert nodeId != null;
+
+        addEventFromProperty("JOB requestId", info, REQ_NEW_JOBID_TS_PROPERTY);
+        addEventFromProperty("JOB responseId", info, RESPONSE_NEW_JOBID_TS_PROPERTY);
+        addEventFromProperty("JOB submit", info, JOB_SUBMISSION_START_TS_PROPERTY);
+    }
+
+    /**
+     * Adds event with timestamp from some property in job info.
+     *
+     * @param evt Event type and phase.
+     * @param info Job info.
+     * @param propName Property name to get timestamp.
+     */
+    private void addEventFromProperty(String evt, HadoopJobInfo info, String propName) {
+        String val = info.property(propName);
+
+        if (!F.isEmpty(val)) {
+            try {
+                evts.add(new T2<>(evt + " " + nodeId, Long.parseLong(val)));
+            }
+            catch (NumberFormatException e) {
+                throw new IllegalStateException("Invalid value '" + val + "' of property '" + propName + "'", e);
+            }
+        }
+    }
+
+    /**
+     * Registers shuffle message event.
+     *
+     * @param reducerNum Number of reducer that receives the data.
+     * @param ts Timestamp of the event.
+     */
+    public void onShuffleMessage(int reducerNum, long ts) {
+        this.reducerNum = reducerNum;
+
+        if (firstShuffleMsg == null)
+            firstShuffleMsg = ts;
+
+        lastShuffleMsg = ts;
+    }
+
+    /**
+     * Gets system predefined performance counter from the HadoopCounters object.
+     *
+     * @param cntrs HadoopCounters object.
+     * @param nodeId Node id for methods that adds events. It may be null if you don't use ones.
+     * @return Predefined performance counter.
+     */
+    public static HadoopPerformanceCounter getCounter(HadoopCounters cntrs, @Nullable UUID nodeId) {
+        HadoopPerformanceCounter cntr = cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class);
+
+        if (nodeId != null)
+            cntr.nodeId(nodeId);
+
+        return cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class);
+    }
+
+    /**
+     * Sets the nodeId field.
+     *
+     * @param nodeId Node id.
+     */
+    private void nodeId(UUID nodeId) {
+        this.nodeId = nodeId;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
new file mode 100644
index 0000000..1ecbee5
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.util.GridStringBuilder;
+import org.apache.ignite.internal.util.typedef.F;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * File system cache utility methods used by Map-Reduce tasks and jobs.
+ */
+public class HadoopFileSystemCacheUtils {
+    /**
+     * A common static factory method. Creates new HadoopLazyConcurrentMap.
+     * @return a new HadoopLazyConcurrentMap.
+     */
+    public static HadoopLazyConcurrentMap<FsCacheKey, FileSystem> createHadoopLazyConcurrentMap() {
+        return new HadoopLazyConcurrentMap<>(
+            new HadoopLazyConcurrentMap.ValueFactory<FsCacheKey, FileSystem>() {
+                @Override public FileSystem createValue(FsCacheKey key) throws IOException {
+                    try {
+                        assert key != null;
+
+                        // Explicitly disable FileSystem caching:
+                        URI uri = key.uri();
+
+                        String scheme = uri.getScheme();
+
+                        // Copy the configuration to avoid altering the external object.
+                        Configuration cfg = new Configuration(key.configuration());
+
+                        String prop = HadoopFileSystemsUtils.disableFsCachePropertyName(scheme);
+
+                        cfg.setBoolean(prop, true);
+
+                        return FileSystem.get(uri, cfg, key.user());
+                    }
+                    catch (InterruptedException e) {
+                        Thread.currentThread().interrupt();
+
+                        throw new IOException("Failed to create file system due to interrupt.", e);
+                    }
+                }
+            }
+        );
+    }
+
+    /**
+     * Gets non-null user name as per the Hadoop viewpoint.
+     * @param cfg the Hadoop job configuration, may be null.
+     * @return the user name, never null.
+     */
+    private static String getMrHadoopUser(Configuration cfg) throws IOException {
+        String user = cfg.get(MRJobConfig.USER_NAME);
+
+        if (user == null)
+            user = IgniteHadoopFileSystem.getFsHadoopUser();
+
+        return user;
+    }
+
+    /**
+     * Common method to get the V1 file system in MapRed engine.
+     * It gets the filesystem for the user specified in the
+     * configuration with {@link MRJobConfig#USER_NAME} property.
+     * The file systems are created and cached in the given map upon first request.
+     *
+     * @param uri The file system uri.
+     * @param cfg The configuration.
+     * @param map The caching map.
+     * @return The file system.
+     * @throws IOException On error.
+     */
+    public static FileSystem fileSystemForMrUserWithCaching(@Nullable URI uri, Configuration cfg,
+        HadoopLazyConcurrentMap<FsCacheKey, FileSystem> map)
+            throws IOException {
+        assert map != null;
+        assert cfg != null;
+
+        final String usr = getMrHadoopUser(cfg);
+
+        assert usr != null;
+
+        if (uri == null)
+            uri = FileSystem.getDefaultUri(cfg);
+
+        final FileSystem fs;
+
+        try {
+            final FsCacheKey key = new FsCacheKey(uri, usr, cfg);
+
+            fs = map.getOrCreate(key);
+        }
+        catch (IgniteException ie) {
+            throw new IOException(ie);
+        }
+
+        assert fs != null;
+        assert !(fs instanceof IgniteHadoopFileSystem) || F.eq(usr, ((IgniteHadoopFileSystem)fs).user());
+
+        return fs;
+    }
+
+    /**
+     * Takes Fs URI using logic similar to that used in FileSystem#get(1,2,3).
+     * @param uri0 The uri.
+     * @param cfg The cfg.
+     * @return Correct URI.
+     */
+    private static URI fixUri(URI uri0, Configuration cfg) {
+        if (uri0 == null)
+            return FileSystem.getDefaultUri(cfg);
+
+        String scheme = uri0.getScheme();
+        String authority = uri0.getAuthority();
+
+        if (authority == null) {
+            URI dfltUri = FileSystem.getDefaultUri(cfg);
+
+            if (scheme == null || (scheme.equals(dfltUri.getScheme()) && dfltUri.getAuthority() != null))
+                return dfltUri;
+        }
+
+        return uri0;
+    }
+
+    /**
+     * Note that configuration is not a part of the key.
+     * It is used solely to initialize the first instance
+     * that is created for the key.
+     */
+    public static final class FsCacheKey {
+        /** */
+        private final URI uri;
+
+        /** */
+        private final String usr;
+
+        /** */
+        private final String equalityKey;
+
+        /** */
+        private final Configuration cfg;
+
+        /**
+         * Constructor
+         */
+        public FsCacheKey(URI uri, String usr, Configuration cfg) {
+            assert uri != null;
+            assert usr != null;
+            assert cfg != null;
+
+            this.uri = fixUri(uri, cfg);
+            this.usr = usr;
+            this.cfg = cfg;
+
+            this.equalityKey = createEqualityKey();
+        }
+
+        /**
+         * Creates String key used for equality and hashing.
+         */
+        private String createEqualityKey() {
+            GridStringBuilder sb = new GridStringBuilder("(").a(usr).a(")@");
+
+            if (uri.getScheme() != null)
+                sb.a(uri.getScheme().toLowerCase());
+
+            sb.a("://");
+
+            if (uri.getAuthority() != null)
+                sb.a(uri.getAuthority().toLowerCase());
+
+            return sb.toString();
+        }
+
+        /**
+         * The URI.
+         */
+        public URI uri() {
+            return uri;
+        }
+
+        /**
+         * The User.
+         */
+        public String user() {
+            return usr;
+        }
+
+        /**
+         * The Configuration.
+         */
+        public Configuration configuration() {
+            return cfg;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("SimplifiableIfStatement")
+        @Override public boolean equals(Object obj) {
+            if (obj == this)
+                return true;
+
+            if (obj == null || getClass() != obj.getClass())
+                return false;
+
+            return equalityKey.equals(((FsCacheKey)obj).equalityKey);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return equalityKey.hashCode();
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return equalityKey;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
new file mode 100644
index 0000000..68c0dc4
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsConstants;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Utilities for configuring file systems to support the separate working directory per each thread.
+ */
+public class HadoopFileSystemsUtils {
+    /** Name of the property for setting working directory on create new local FS instance. */
+    public static final String LOC_FS_WORK_DIR_PROP = "fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".workDir";
+
+    /**
+     * Setup wrappers of filesystems to support the separate working directory.
+     *
+     * @param cfg Config for setup.
+     */
+    public static void setupFileSystems(Configuration cfg) {
+        cfg.set("fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl", HadoopLocalFileSystemV1.class.getName());
+        cfg.set("fs.AbstractFileSystem." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl",
+                HadoopLocalFileSystemV2.class.getName());
+    }
+
+    /**
+     * Gets the property name to disable file system cache.
+     * @param scheme The file system URI scheme.
+     * @return The property name. If scheme is null,
+     * returns "fs.null.impl.disable.cache".
+     */
+    public static String disableFsCachePropertyName(@Nullable String scheme) {
+        return String.format("fs.%s.impl.disable.cache", scheme);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
new file mode 100644
index 0000000..681cddb
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLazyConcurrentMap.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.jsr166.ConcurrentHashMap8;
+
+/**
+ * Maps values by keys.
+ * Values are created lazily using {@link ValueFactory}.
+ *
+ * Despite of the name, does not depend on any Hadoop classes.
+ */
+public class HadoopLazyConcurrentMap<K, V extends Closeable> {
+    /** The map storing the actual values. */
+    private final ConcurrentMap<K, ValueWrapper> map = new ConcurrentHashMap8<>();
+
+    /** The factory passed in by the client. Will be used for lazy value creation. */
+    private final ValueFactory<K, V> factory;
+
+    /** Lock used to close the objects. */
+    private final ReadWriteLock closeLock = new ReentrantReadWriteLock();
+
+    /** Flag indicating that this map is closed and cleared. */
+    private boolean closed;
+
+    /**
+     * Constructor.
+     * @param factory the factory to create new values lazily.
+     */
+    public HadoopLazyConcurrentMap(ValueFactory<K, V> factory) {
+        this.factory = factory;
+
+        assert getClass().getClassLoader() == Ignite.class.getClassLoader();
+    }
+
+    /**
+     * Gets cached or creates a new value of V.
+     * Never returns null.
+     * @param k the key to associate the value with.
+     * @return the cached or newly created value, never null.
+     * @throws IgniteException on error
+     */
+    public V getOrCreate(K k) {
+        ValueWrapper w = map.get(k);
+
+        if (w == null) {
+            closeLock.readLock().lock();
+
+            try {
+                if (closed)
+                    throw new IllegalStateException("Failed to create value for key [" + k
+                        + "]: the map is already closed.");
+
+                final ValueWrapper wNew = new ValueWrapper(k);
+
+                w = map.putIfAbsent(k, wNew);
+
+                if (w == null) {
+                    wNew.init();
+
+                    w = wNew;
+                }
+            }
+            finally {
+                closeLock.readLock().unlock();
+            }
+        }
+
+        try {
+            V v = w.getValue();
+
+            assert v != null;
+
+            return v;
+        }
+        catch (IgniteCheckedException ie) {
+            throw new IgniteException(ie);
+        }
+    }
+
+    /**
+     * Clears the map and closes all the values.
+     */
+    public void close() throws IgniteCheckedException {
+        closeLock.writeLock().lock();
+
+        try {
+            if (closed)
+                return;
+
+            closed = true;
+
+            Exception err = null;
+
+            Set<K> keySet = map.keySet();
+
+            for (K key : keySet) {
+                V v = null;
+
+                try {
+                    v = map.get(key).getValue();
+                }
+                catch (IgniteCheckedException ignore) {
+                    // No-op.
+                }
+
+                if (v != null) {
+                    try {
+                        v.close();
+                    }
+                    catch (Exception err0) {
+                        if (err == null)
+                            err = err0;
+                    }
+                }
+            }
+
+            map.clear();
+
+            if (err != null)
+                throw new IgniteCheckedException(err);
+        }
+        finally {
+            closeLock.writeLock().unlock();
+        }
+    }
+
+    /**
+     * Helper class that drives the lazy value creation.
+     */
+    private class ValueWrapper {
+        /** Future. */
+        private final GridFutureAdapter<V> fut = new GridFutureAdapter<>();
+
+        /** the key */
+        private final K key;
+
+        /**
+         * Creates new wrapper.
+         */
+        private ValueWrapper(K key) {
+            this.key = key;
+        }
+
+        /**
+         * Initializes the value using the factory.
+         */
+        private void init() {
+            try {
+                final V v0 = factory.createValue(key);
+
+                if (v0 == null)
+                    throw new IgniteException("Failed to create non-null value. [key=" + key + ']');
+
+                fut.onDone(v0);
+            }
+            catch (Throwable e) {
+                fut.onDone(e);
+            }
+        }
+
+        /**
+         * Gets the available value or blocks until the value is initialized.
+         * @return the value, never null.
+         * @throws IgniteCheckedException on error.
+         */
+        V getValue() throws IgniteCheckedException {
+            return fut.get();
+        }
+    }
+
+    /**
+     * Interface representing the factory that creates map values.
+     * @param <K> the type of the key.
+     * @param <V> the type of the value.
+     */
+    public interface ValueFactory <K, V> {
+        /**
+         * Creates the new value. Should never return null.
+         *
+         * @param key the key to create value for
+         * @return the value.
+         * @throws IOException On failure.
+         */
+        public V createValue(K key) throws IOException;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
new file mode 100644
index 0000000..cbb007f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.fs;
+
+import java.io.File;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Local file system replacement for Hadoop jobs.
+ */
+public class HadoopLocalFileSystemV1 extends LocalFileSystem {
+    /**
+     * Creates new local file system.
+     */
+    public HadoopLocalFileSystemV1() {
+        super(new HadoopRawLocalFileSystem());
+    }
+
+    /** {@inheritDoc} */
+    @Override public File pathToFile(Path path) {
+        return ((HadoopRawLocalFileSystem)getRaw()).convert(path);
+    }
+}
\ No newline at end of file


[14/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
deleted file mode 100644
index 3eb819b..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.util.UUID;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_CANCELLING;
-
-/**
- * Submit job task.
- */
-public class HadoopProtocolSubmitJobTask extends HadoopProtocolTaskAdapter<HadoopJobStatus> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop,
-        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
-        UUID nodeId = UUID.fromString(args.<String>get(0));
-        Integer id = args.get(1);
-        HadoopDefaultJobInfo info = args.get(2);
-
-        assert nodeId != null;
-        assert id != null;
-        assert info != null;
-
-        HadoopJobId jobId = new HadoopJobId(nodeId, id);
-
-        hadoop.submit(jobId, info);
-
-        HadoopJobStatus res = hadoop.status(jobId);
-
-        if (res == null) // Submission failed.
-            res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1);
-
-        return res;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
deleted file mode 100644
index c3227ae..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.compute.ComputeJob;
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.compute.ComputeJobResult;
-import org.apache.ignite.compute.ComputeJobResultPolicy;
-import org.apache.ignite.compute.ComputeTask;
-import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.apache.ignite.resources.JobContextResource;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop protocol task adapter.
- */
-public abstract class HadoopProtocolTaskAdapter<R> implements ComputeTask<HadoopProtocolTaskArguments, R> {
-    /** {@inheritDoc} */
-    @Nullable @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid,
-        @Nullable HadoopProtocolTaskArguments arg) {
-        return Collections.singletonMap(new Job(arg), subgrid.get(0));
-    }
-
-    /** {@inheritDoc} */
-    @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
-        return ComputeJobResultPolicy.REDUCE;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public R reduce(List<ComputeJobResult> results) {
-        if (!F.isEmpty(results)) {
-            ComputeJobResult res = results.get(0);
-
-            return res.getData();
-        }
-        else
-            return null;
-    }
-
-    /**
-     * Job wrapper.
-     */
-    private class Job implements ComputeJob {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** */
-        @SuppressWarnings("UnusedDeclaration")
-        @JobContextResource
-        private ComputeJobContext jobCtx;
-
-        /** Argument. */
-        private final HadoopProtocolTaskArguments args;
-
-        /**
-         * Constructor.
-         *
-         * @param args Job argument.
-         */
-        private Job(HadoopProtocolTaskArguments args) {
-            this.args = args;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void cancel() {
-            // No-op.
-        }
-
-        /** {@inheritDoc} */
-        @Nullable @Override public Object execute() {
-            try {
-                return run(jobCtx, ((IgniteEx)ignite).hadoop(), args);
-            }
-            catch (IgniteCheckedException e) {
-                throw U.convertException(e);
-            }
-        }
-    }
-
-    /**
-     * Run the task.
-     *
-     * @param jobCtx Job context.
-     * @param hadoop Hadoop facade.
-     * @param args Arguments.
-     * @return Job result.
-     * @throws IgniteCheckedException If failed.
-     */
-    public abstract R run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args)
-        throws IgniteCheckedException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
deleted file mode 100644
index e497454..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Task arguments.
- */
-public class HadoopProtocolTaskArguments implements Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Arguments. */
-    private Object[] args;
-
-    /**
-     * {@link Externalizable} support.
-     */
-    public HadoopProtocolTaskArguments() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param args Arguments.
-     */
-    public HadoopProtocolTaskArguments(Object... args) {
-        this.args = args;
-    }
-
-    /**
-     * @param idx Argument index.
-     * @return Argument.
-     */
-    @SuppressWarnings("unchecked")
-    @Nullable public <T> T get(int idx) {
-        return (args != null && args.length > idx) ? (T)args[idx] : null;
-    }
-
-    /**
-     * @return Size.
-     */
-    public int size() {
-        return args != null ? args.length : 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeArray(out, args);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        args = U.readArray(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopProtocolTaskArguments.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
deleted file mode 100644
index 769bdc4..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle;
-
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.internal.GridTopic;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
-import org.apache.ignite.internal.processors.hadoop.HadoopContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.future.GridFinishedFuture;
-import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiPredicate;
-
-/**
- * Shuffle.
- */
-public class HadoopShuffle extends HadoopComponent {
-    /** */
-    private final ConcurrentMap<HadoopJobId, HadoopShuffleJob<UUID>> jobs = new ConcurrentHashMap<>();
-
-    /** */
-    protected final GridUnsafeMemory mem = new GridUnsafeMemory(0);
-
-    /** {@inheritDoc} */
-    @Override public void start(HadoopContext ctx) throws IgniteCheckedException {
-        super.start(ctx);
-
-        ctx.kernalContext().io().addUserMessageListener(GridTopic.TOPIC_HADOOP,
-            new IgniteBiPredicate<UUID, Object>() {
-                @Override public boolean apply(UUID nodeId, Object msg) {
-                    return onMessageReceived(nodeId, (HadoopMessage)msg);
-                }
-            });
-    }
-
-    /**
-     * Stops shuffle.
-     *
-     * @param cancel If should cancel all ongoing activities.
-     */
-    @Override public void stop(boolean cancel) {
-        for (HadoopShuffleJob job : jobs.values()) {
-            try {
-                job.close();
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to close job.", e);
-            }
-        }
-
-        jobs.clear();
-    }
-
-    /**
-     * Creates new shuffle job.
-     *
-     * @param jobId Job ID.
-     * @return Created shuffle job.
-     * @throws IgniteCheckedException If job creation failed.
-     */
-    private HadoopShuffleJob<UUID> newJob(HadoopJobId jobId) throws IgniteCheckedException {
-        HadoopMapReducePlan plan = ctx.jobTracker().plan(jobId);
-
-        HadoopShuffleJob<UUID> job = new HadoopShuffleJob<>(ctx.localNodeId(), log,
-            ctx.jobTracker().job(jobId, null), mem, plan.reducers(), plan.reducers(ctx.localNodeId()));
-
-        UUID[] rdcAddrs = new UUID[plan.reducers()];
-
-        for (int i = 0; i < rdcAddrs.length; i++) {
-            UUID nodeId = plan.nodeForReducer(i);
-
-            assert nodeId != null : "Plan is missing node for reducer [plan=" + plan + ", rdc=" + i + ']';
-
-            rdcAddrs[i] = nodeId;
-        }
-
-        boolean init = job.initializeReduceAddresses(rdcAddrs);
-
-        assert init;
-
-        return job;
-    }
-
-    /**
-     * @param nodeId Node ID to send message to.
-     * @param msg Message to send.
-     * @throws IgniteCheckedException If send failed.
-     */
-    private void send0(UUID nodeId, Object msg) throws IgniteCheckedException {
-        ClusterNode node = ctx.kernalContext().discovery().node(nodeId);
-
-        ctx.kernalContext().io().sendUserMessage(F.asList(node), msg, GridTopic.TOPIC_HADOOP, false, 0);
-    }
-
-    /**
-     * @param jobId Task info.
-     * @return Shuffle job.
-     */
-    private HadoopShuffleJob<UUID> job(HadoopJobId jobId) throws IgniteCheckedException {
-        HadoopShuffleJob<UUID> res = jobs.get(jobId);
-
-        if (res == null) {
-            res = newJob(jobId);
-
-            HadoopShuffleJob<UUID> old = jobs.putIfAbsent(jobId, res);
-
-            if (old != null) {
-                res.close();
-
-                res = old;
-            }
-            else if (res.reducersInitialized())
-                startSending(res);
-        }
-
-        return res;
-    }
-
-    /**
-     * Starts message sending thread.
-     *
-     * @param shuffleJob Job to start sending for.
-     */
-    private void startSending(HadoopShuffleJob<UUID> shuffleJob) {
-        shuffleJob.startSending(ctx.kernalContext().gridName(),
-            new IgniteInClosure2X<UUID, HadoopShuffleMessage>() {
-                @Override public void applyx(UUID dest, HadoopShuffleMessage msg) throws IgniteCheckedException {
-                    send0(dest, msg);
-                }
-            }
-        );
-    }
-
-    /**
-     * Message received callback.
-     *
-     * @param src Sender node ID.
-     * @param msg Received message.
-     * @return {@code True}.
-     */
-    public boolean onMessageReceived(UUID src, HadoopMessage msg) {
-        if (msg instanceof HadoopShuffleMessage) {
-            HadoopShuffleMessage m = (HadoopShuffleMessage)msg;
-
-            try {
-                job(m.jobId()).onShuffleMessage(m);
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Message handling failed.", e);
-            }
-
-            try {
-                // Reply with ack.
-                send0(src, new HadoopShuffleAck(m.id(), m.jobId()));
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to reply back to shuffle message sender [snd=" + src + ", msg=" + msg + ']', e);
-            }
-        }
-        else if (msg instanceof HadoopShuffleAck) {
-            HadoopShuffleAck m = (HadoopShuffleAck)msg;
-
-            try {
-                job(m.jobId()).onShuffleAck(m);
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Message handling failed.", e);
-            }
-        }
-        else
-            throw new IllegalStateException("Unknown message type received to Hadoop shuffle [src=" + src +
-                ", msg=" + msg + ']');
-
-        return true;
-    }
-
-    /**
-     * @param taskCtx Task info.
-     * @return Output.
-     */
-    public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        return job(taskCtx.taskInfo().jobId()).output(taskCtx);
-    }
-
-    /**
-     * @param taskCtx Task info.
-     * @return Input.
-     */
-    public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        return job(taskCtx.taskInfo().jobId()).input(taskCtx);
-    }
-
-    /**
-     * @param jobId Job id.
-     */
-    public void jobFinished(HadoopJobId jobId) {
-        HadoopShuffleJob job = jobs.remove(jobId);
-
-        if (job != null) {
-            try {
-                job.close();
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to close job: " + jobId, e);
-            }
-        }
-    }
-
-    /**
-     * Flushes all the outputs for the given job to remote nodes.
-     *
-     * @param jobId Job ID.
-     * @return Future.
-     */
-    public IgniteInternalFuture<?> flush(HadoopJobId jobId) {
-        HadoopShuffleJob job = jobs.get(jobId);
-
-        if (job == null)
-            return new GridFinishedFuture<>();
-
-        try {
-            return job.flush();
-        }
-        catch (IgniteCheckedException e) {
-            return new GridFinishedFuture<>(e);
-        }
-    }
-
-    /**
-     * @return Memory.
-     */
-    public GridUnsafeMemory memory() {
-        return mem;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
deleted file mode 100644
index 6013ec6..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Acknowledgement message.
- */
-public class HadoopShuffleAck implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    @GridToStringInclude
-    private long msgId;
-
-    /** */
-    @GridToStringInclude
-    private HadoopJobId jobId;
-
-    /**
-     *
-     */
-    public HadoopShuffleAck() {
-        // No-op.
-    }
-
-    /**
-     * @param msgId Message ID.
-     */
-    public HadoopShuffleAck(long msgId, HadoopJobId jobId) {
-        assert jobId != null;
-
-        this.msgId = msgId;
-        this.jobId = jobId;
-    }
-
-    /**
-     * @return Message ID.
-     */
-    public long id() {
-        return msgId;
-    }
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        jobId.writeExternal(out);
-        out.writeLong(msgId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobId = new HadoopJobId();
-
-        jobId.readExternal(in);
-        msgId = in.readLong();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopShuffleAck.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
deleted file mode 100644
index b940c72..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
+++ /dev/null
@@ -1,612 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicReferenceArray;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimap;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipList;
-import org.apache.ignite.internal.util.GridUnsafe;
-import org.apache.ignite.internal.util.future.GridCompoundFuture;
-import org.apache.ignite.internal.util.future.GridFinishedFuture;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.io.GridUnsafeDataInput;
-import org.apache.ignite.internal.util.lang.GridClosureException;
-import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.internal.util.worker.GridWorker;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.lang.IgniteInClosure;
-import org.apache.ignite.thread.IgniteThread;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.PARTITION_HASHMAP_SIZE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_REDUCER_NO_SORTING;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
-
-/**
- * Shuffle job.
- */
-public class HadoopShuffleJob<T> implements AutoCloseable {
-    /** */
-    private static final int MSG_BUF_SIZE = 128 * 1024;
-
-    /** */
-    private final HadoopJob job;
-
-    /** */
-    private final GridUnsafeMemory mem;
-
-    /** */
-    private final boolean needPartitioner;
-
-    /** Collection of task contexts for each reduce task. */
-    private final Map<Integer, HadoopTaskContext> reducersCtx = new HashMap<>();
-
-    /** Reducers addresses. */
-    private T[] reduceAddrs;
-
-    /** Local reducers address. */
-    private final T locReduceAddr;
-
-    /** */
-    private final HadoopShuffleMessage[] msgs;
-
-    /** */
-    private final AtomicReferenceArray<HadoopMultimap> maps;
-
-    /** */
-    private volatile IgniteInClosure2X<T, HadoopShuffleMessage> io;
-
-    /** */
-    protected ConcurrentMap<Long, IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>>> sentMsgs =
-        new ConcurrentHashMap<>();
-
-    /** */
-    private volatile GridWorker snd;
-
-    /** Latch for remote addresses waiting. */
-    private final CountDownLatch ioInitLatch = new CountDownLatch(1);
-
-    /** Finished flag. Set on flush or close. */
-    private volatile boolean flushed;
-
-    /** */
-    private final IgniteLogger log;
-
-    /**
-     * @param locReduceAddr Local reducer address.
-     * @param log Logger.
-     * @param job Job.
-     * @param mem Memory.
-     * @param totalReducerCnt Amount of reducers in the Job.
-     * @param locReducers Reducers will work on current node.
-     * @throws IgniteCheckedException If error.
-     */
-    public HadoopShuffleJob(T locReduceAddr, IgniteLogger log, HadoopJob job, GridUnsafeMemory mem,
-        int totalReducerCnt, int[] locReducers) throws IgniteCheckedException {
-        this.locReduceAddr = locReduceAddr;
-        this.job = job;
-        this.mem = mem;
-        this.log = log.getLogger(HadoopShuffleJob.class);
-
-        if (!F.isEmpty(locReducers)) {
-            for (int rdc : locReducers) {
-                HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, job.id(), rdc, 0, null);
-
-                reducersCtx.put(rdc, job.getTaskContext(taskInfo));
-            }
-        }
-
-        needPartitioner = totalReducerCnt > 1;
-
-        maps = new AtomicReferenceArray<>(totalReducerCnt);
-        msgs = new HadoopShuffleMessage[totalReducerCnt];
-    }
-
-    /**
-     * @param reduceAddrs Addresses of reducers.
-     * @return {@code True} if addresses were initialized by this call.
-     */
-    public boolean initializeReduceAddresses(T[] reduceAddrs) {
-        if (this.reduceAddrs == null) {
-            this.reduceAddrs = reduceAddrs;
-
-            return true;
-        }
-
-        return false;
-    }
-
-    /**
-     * @return {@code True} if reducers addresses were initialized.
-     */
-    public boolean reducersInitialized() {
-        return reduceAddrs != null;
-    }
-
-    /**
-     * @param gridName Grid name.
-     * @param io IO Closure for sending messages.
-     */
-    @SuppressWarnings("BusyWait")
-    public void startSending(String gridName, IgniteInClosure2X<T, HadoopShuffleMessage> io) {
-        assert snd == null;
-        assert io != null;
-
-        this.io = io;
-
-        if (!flushed) {
-            snd = new GridWorker(gridName, "hadoop-shuffle-" + job.id(), log) {
-                @Override protected void body() throws InterruptedException {
-                    try {
-                        while (!isCancelled()) {
-                            Thread.sleep(5);
-
-                            collectUpdatesAndSend(false);
-                        }
-                    }
-                    catch (IgniteCheckedException e) {
-                        throw new IllegalStateException(e);
-                    }
-                }
-            };
-
-            new IgniteThread(snd).start();
-        }
-
-        ioInitLatch.countDown();
-    }
-
-    /**
-     * @param maps Maps.
-     * @param idx Index.
-     * @return Map.
-     */
-    private HadoopMultimap getOrCreateMap(AtomicReferenceArray<HadoopMultimap> maps, int idx) {
-        HadoopMultimap map = maps.get(idx);
-
-        if (map == null) { // Create new map.
-            map = get(job.info(), SHUFFLE_REDUCER_NO_SORTING, false) ?
-                new HadoopConcurrentHashMultimap(job.info(), mem, get(job.info(), PARTITION_HASHMAP_SIZE, 8 * 1024)):
-                new HadoopSkipList(job.info(), mem);
-
-            if (!maps.compareAndSet(idx, null, map)) {
-                map.close();
-
-                return maps.get(idx);
-            }
-        }
-
-        return map;
-    }
-
-    /**
-     * @param msg Message.
-     * @throws IgniteCheckedException Exception.
-     */
-    public void onShuffleMessage(HadoopShuffleMessage msg) throws IgniteCheckedException {
-        assert msg.buffer() != null;
-        assert msg.offset() > 0;
-
-        HadoopTaskContext taskCtx = reducersCtx.get(msg.reducer());
-
-        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(taskCtx.counters(), null);
-
-        perfCntr.onShuffleMessage(msg.reducer(), U.currentTimeMillis());
-
-        HadoopMultimap map = getOrCreateMap(maps, msg.reducer());
-
-        // Add data from message to the map.
-        try (HadoopMultimap.Adder adder = map.startAdding(taskCtx)) {
-            final GridUnsafeDataInput dataInput = new GridUnsafeDataInput();
-            final UnsafeValue val = new UnsafeValue(msg.buffer());
-
-            msg.visit(new HadoopShuffleMessage.Visitor() {
-                /** */
-                private HadoopMultimap.Key key;
-
-                @Override public void onKey(byte[] buf, int off, int len) throws IgniteCheckedException {
-                    dataInput.bytes(buf, off, off + len);
-
-                    key = adder.addKey(dataInput, key);
-                }
-
-                @Override public void onValue(byte[] buf, int off, int len) {
-                    val.off = off;
-                    val.size = len;
-
-                    key.add(val);
-                }
-            });
-        }
-    }
-
-    /**
-     * @param ack Shuffle ack.
-     */
-    @SuppressWarnings("ConstantConditions")
-    public void onShuffleAck(HadoopShuffleAck ack) {
-        IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup = sentMsgs.get(ack.id());
-
-        if (tup != null)
-            tup.get2().onDone();
-        else
-            log.warning("Received shuffle ack for not registered shuffle id: " + ack);
-    }
-
-    /**
-     * Unsafe value.
-     */
-    private static class UnsafeValue implements HadoopMultimap.Value {
-        /** */
-        private final byte[] buf;
-
-        /** */
-        private int off;
-
-        /** */
-        private int size;
-
-        /**
-         * @param buf Buffer.
-         */
-        private UnsafeValue(byte[] buf) {
-            assert buf != null;
-
-            this.buf = buf;
-        }
-
-        /** */
-        @Override public int size() {
-            return size;
-        }
-
-        /** */
-        @Override public void copyTo(long ptr) {
-            GridUnsafe.copyMemory(buf, GridUnsafe.BYTE_ARR_OFF + off, null, ptr, size);
-        }
-    }
-
-    /**
-     * Sends map updates to remote reducers.
-     */
-    private void collectUpdatesAndSend(boolean flush) throws IgniteCheckedException {
-        for (int i = 0; i < maps.length(); i++) {
-            HadoopMultimap map = maps.get(i);
-
-            if (map == null || locReduceAddr.equals(reduceAddrs[i]))
-                continue; // Skip empty map and local node.
-
-            if (msgs[i] == null)
-                msgs[i] = new HadoopShuffleMessage(job.id(), i, MSG_BUF_SIZE);
-
-            final int idx = i;
-
-            map.visit(false, new HadoopMultimap.Visitor() {
-                /** */
-                private long keyPtr;
-
-                /** */
-                private int keySize;
-
-                /** */
-                private boolean keyAdded;
-
-                /** {@inheritDoc} */
-                @Override public void onKey(long keyPtr, int keySize) {
-                    this.keyPtr = keyPtr;
-                    this.keySize = keySize;
-
-                    keyAdded = false;
-                }
-
-                private boolean tryAdd(long valPtr, int valSize) {
-                    HadoopShuffleMessage msg = msgs[idx];
-
-                    if (!keyAdded) { // Add key and value.
-                        int size = keySize + valSize;
-
-                        if (!msg.available(size, false))
-                            return false;
-
-                        msg.addKey(keyPtr, keySize);
-                        msg.addValue(valPtr, valSize);
-
-                        keyAdded = true;
-
-                        return true;
-                    }
-
-                    if (!msg.available(valSize, true))
-                        return false;
-
-                    msg.addValue(valPtr, valSize);
-
-                    return true;
-                }
-
-                /** {@inheritDoc} */
-                @Override public void onValue(long valPtr, int valSize) {
-                    if (tryAdd(valPtr, valSize))
-                        return;
-
-                    send(idx, keySize + valSize);
-
-                    keyAdded = false;
-
-                    if (!tryAdd(valPtr, valSize))
-                        throw new IllegalStateException();
-                }
-            });
-
-            if (flush && msgs[i].offset() != 0)
-                send(i, 0);
-        }
-    }
-
-    /**
-     * @param idx Index of message.
-     * @param newBufMinSize Min new buffer size.
-     */
-    private void send(final int idx, int newBufMinSize) {
-        final GridFutureAdapter<?> fut = new GridFutureAdapter<>();
-
-        HadoopShuffleMessage msg = msgs[idx];
-
-        final long msgId = msg.id();
-
-        IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> old = sentMsgs.putIfAbsent(msgId,
-            new IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>>(msg, fut));
-
-        assert old == null;
-
-        try {
-            io.apply(reduceAddrs[idx], msg);
-        }
-        catch (GridClosureException e) {
-            fut.onDone(U.unwrap(e));
-        }
-
-        fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
-            @Override public void apply(IgniteInternalFuture<?> f) {
-                try {
-                    f.get();
-
-                    // Clean up the future from map only if there was no exception.
-                    // Otherwise flush() should fail.
-                    sentMsgs.remove(msgId);
-                }
-                catch (IgniteCheckedException e) {
-                    log.error("Failed to send message.", e);
-                }
-            }
-        });
-
-        msgs[idx] = newBufMinSize == 0 ? null : new HadoopShuffleMessage(job.id(), idx,
-            Math.max(MSG_BUF_SIZE, newBufMinSize));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IgniteCheckedException {
-        if (snd != null) {
-            snd.cancel();
-
-            try {
-                snd.join();
-            }
-            catch (InterruptedException e) {
-                throw new IgniteInterruptedCheckedException(e);
-            }
-        }
-
-        close(maps);
-    }
-
-    /**
-     * @param maps Maps.
-     */
-    private void close(AtomicReferenceArray<HadoopMultimap> maps) {
-        for (int i = 0; i < maps.length(); i++) {
-            HadoopMultimap map = maps.get(i);
-
-            if (map != null)
-                map.close();
-        }
-    }
-
-    /**
-     * @return Future.
-     */
-    @SuppressWarnings("unchecked")
-    public IgniteInternalFuture<?> flush() throws IgniteCheckedException {
-        if (log.isDebugEnabled())
-            log.debug("Flushing job " + job.id() + " on address " + locReduceAddr);
-
-        flushed = true;
-
-        if (maps.length() == 0)
-            return new GridFinishedFuture<>();
-
-        U.await(ioInitLatch);
-
-        GridWorker snd0 = snd;
-
-        if (snd0 != null) {
-            if (log.isDebugEnabled())
-                log.debug("Cancelling sender thread.");
-
-            snd0.cancel();
-
-            try {
-                snd0.join();
-
-                if (log.isDebugEnabled())
-                    log.debug("Finished waiting for sending thread to complete on shuffle job flush: " + job.id());
-            }
-            catch (InterruptedException e) {
-                throw new IgniteInterruptedCheckedException(e);
-            }
-        }
-
-        collectUpdatesAndSend(true); // With flush.
-
-        if (log.isDebugEnabled())
-            log.debug("Finished sending collected updates to remote reducers: " + job.id());
-
-        GridCompoundFuture fut = new GridCompoundFuture<>();
-
-        for (IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup : sentMsgs.values())
-            fut.add(tup.get2());
-
-        fut.markInitialized();
-
-        if (log.isDebugEnabled())
-            log.debug("Collected futures to compound futures for flush: " + sentMsgs.size());
-
-        return fut;
-    }
-
-    /**
-     * @param taskCtx Task context.
-     * @return Output.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        switch (taskCtx.taskInfo().type()) {
-            case MAP:
-                assert !job.info().hasCombiner() : "The output creation is allowed if combiner has not been defined.";
-
-            case COMBINE:
-                return new PartitionedOutput(taskCtx);
-
-            default:
-                throw new IllegalStateException("Illegal type: " + taskCtx.taskInfo().type());
-        }
-    }
-
-    /**
-     * @param taskCtx Task context.
-     * @return Input.
-     * @throws IgniteCheckedException If failed.
-     */
-    @SuppressWarnings("unchecked")
-    public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        switch (taskCtx.taskInfo().type()) {
-            case REDUCE:
-                int reducer = taskCtx.taskInfo().taskNumber();
-
-                HadoopMultimap m = maps.get(reducer);
-
-                if (m != null)
-                    return m.input(taskCtx);
-
-                return new HadoopTaskInput() { // Empty input.
-                    @Override public boolean next() {
-                        return false;
-                    }
-
-                    @Override public Object key() {
-                        throw new IllegalStateException();
-                    }
-
-                    @Override public Iterator<?> values() {
-                        throw new IllegalStateException();
-                    }
-
-                    @Override public void close() {
-                        // No-op.
-                    }
-                };
-
-            default:
-                throw new IllegalStateException("Illegal type: " + taskCtx.taskInfo().type());
-        }
-    }
-
-    /**
-     * Partitioned output.
-     */
-    private class PartitionedOutput implements HadoopTaskOutput {
-        /** */
-        private final HadoopTaskOutput[] adders = new HadoopTaskOutput[maps.length()];
-
-        /** */
-        private HadoopPartitioner partitioner;
-
-        /** */
-        private final HadoopTaskContext taskCtx;
-
-        /**
-         * Constructor.
-         * @param taskCtx Task context.
-         */
-        private PartitionedOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-            this.taskCtx = taskCtx;
-
-            if (needPartitioner)
-                partitioner = taskCtx.partitioner();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void write(Object key, Object val) throws IgniteCheckedException {
-            int part = 0;
-
-            if (partitioner != null) {
-                part = partitioner.partition(key, val, adders.length);
-
-                if (part < 0 || part >= adders.length)
-                    throw new IgniteCheckedException("Invalid partition: " + part);
-            }
-
-            HadoopTaskOutput out = adders[part];
-
-            if (out == null)
-                adders[part] = out = getOrCreateMap(maps, part).startAdding(taskCtx);
-
-            out.write(key, val);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            for (HadoopTaskOutput adder : adders) {
-                if (adder != null)
-                    adder.close();
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
deleted file mode 100644
index 69dfe64..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.GridUnsafe;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Shuffle message.
- */
-public class HadoopShuffleMessage implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    private static final AtomicLong ids = new AtomicLong();
-
-    /** */
-    private static final byte MARKER_KEY = (byte)17;
-
-    /** */
-    private static final byte MARKER_VALUE = (byte)31;
-
-    /** */
-    @GridToStringInclude
-    private long msgId;
-
-    /** */
-    @GridToStringInclude
-    private HadoopJobId jobId;
-
-    /** */
-    @GridToStringInclude
-    private int reducer;
-
-    /** */
-    private byte[] buf;
-
-    /** */
-    @GridToStringInclude
-    private int off;
-
-    /**
-     *
-     */
-    public HadoopShuffleMessage() {
-        // No-op.
-    }
-
-    /**
-     * @param size Size.
-     */
-    public HadoopShuffleMessage(HadoopJobId jobId, int reducer, int size) {
-        assert jobId != null;
-
-        buf = new byte[size];
-
-        this.jobId = jobId;
-        this.reducer = reducer;
-
-        msgId = ids.incrementAndGet();
-    }
-
-    /**
-     * @return Message ID.
-     */
-    public long id() {
-        return msgId;
-    }
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /**
-     * @return Reducer.
-     */
-    public int reducer() {
-        return reducer;
-    }
-
-    /**
-     * @return Buffer.
-     */
-    public byte[] buffer() {
-        return buf;
-    }
-
-    /**
-     * @return Offset.
-     */
-    public int offset() {
-        return off;
-    }
-
-    /**
-     * @param size Size.
-     * @param valOnly Only value wll be added.
-     * @return {@code true} If this message can fit additional data of this size
-     */
-    public boolean available(int size, boolean valOnly) {
-        size += valOnly ? 5 : 10;
-
-        if (off + size > buf.length) {
-            if (off == 0) { // Resize if requested size is too big.
-                buf = new byte[size];
-
-                return true;
-            }
-
-            return false;
-        }
-
-        return true;
-    }
-
-    /**
-     * @param keyPtr Key pointer.
-     * @param keySize Key size.
-     */
-    public void addKey(long keyPtr, int keySize) {
-        add(MARKER_KEY, keyPtr, keySize);
-    }
-
-    /**
-     * @param valPtr Value pointer.
-     * @param valSize Value size.
-     */
-    public void addValue(long valPtr, int valSize) {
-        add(MARKER_VALUE, valPtr, valSize);
-    }
-
-    /**
-     * @param marker Marker.
-     * @param ptr Pointer.
-     * @param size Size.
-     */
-    private void add(byte marker, long ptr, int size) {
-        buf[off++] = marker;
-
-        GridUnsafe.putInt(buf, GridUnsafe.BYTE_ARR_OFF + off, size);
-
-        off += 4;
-
-        GridUnsafe.copyMemory(null, ptr, buf, GridUnsafe.BYTE_ARR_OFF + off, size);
-
-        off += size;
-    }
-
-    /**
-     * @param v Visitor.
-     */
-    public void visit(Visitor v) throws IgniteCheckedException {
-        for (int i = 0; i < off;) {
-            byte marker = buf[i++];
-
-            int size = GridUnsafe.getInt(buf, GridUnsafe.BYTE_ARR_OFF + i);
-
-            i += 4;
-
-            if (marker == MARKER_VALUE)
-                v.onValue(buf, i, size);
-            else if (marker == MARKER_KEY)
-                v.onKey(buf, i, size);
-            else
-                throw new IllegalStateException();
-
-            i += size;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        jobId.writeExternal(out);
-        out.writeLong(msgId);
-        out.writeInt(reducer);
-        out.writeInt(off);
-        U.writeByteArray(out, buf);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobId = new HadoopJobId();
-
-        jobId.readExternal(in);
-        msgId = in.readLong();
-        reducer = in.readInt();
-        off = in.readInt();
-        buf = U.readByteArray(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopShuffleMessage.class, this);
-    }
-
-    /**
-     * Visitor.
-     */
-    public static interface Visitor {
-        /**
-         * @param buf Buffer.
-         * @param off Offset.
-         * @param len Length.
-         */
-        public void onKey(byte[] buf, int off, int len) throws IgniteCheckedException;
-
-        /**
-         * @param buf Buffer.
-         * @param off Offset.
-         * @param len Length.
-         */
-        public void onValue(byte[] buf, int off, int len) throws IgniteCheckedException;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
deleted file mode 100644
index ffa7871..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
+++ /dev/null
@@ -1,616 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import java.io.DataInput;
-import java.util.Random;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLongArray;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.util.GridLongList;
-import org.apache.ignite.internal.util.GridRandom;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Multimap for map reduce intermediate results.
- */
-public class HadoopConcurrentHashMultimap extends HadoopHashMultimapBase {
-    /** */
-    private final AtomicReference<State> state = new AtomicReference<>(State.READING_WRITING);
-
-    /** */
-    private volatile AtomicLongArray oldTbl;
-
-    /** */
-    private volatile AtomicLongArray newTbl;
-
-    /** */
-    private final AtomicInteger keys = new AtomicInteger();
-
-    /** */
-    private final CopyOnWriteArrayList<AdderImpl> adders = new CopyOnWriteArrayList<>();
-
-    /** */
-    private final AtomicInteger inputs = new AtomicInteger();
-
-    /**
-     * @param jobInfo Job info.
-     * @param mem Memory.
-     * @param cap Initial capacity.
-     */
-    public HadoopConcurrentHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) {
-        super(jobInfo, mem);
-
-        assert U.isPow2(cap);
-
-        newTbl = oldTbl = new AtomicLongArray(cap);
-    }
-
-    /**
-     * @return Number of keys.
-     */
-    public long keys() {
-        int res = keys.get();
-
-        for (AdderImpl adder : adders)
-            res += adder.locKeys.get();
-
-        return res;
-    }
-
-    /**
-     * @return Current table capacity.
-     */
-    @Override public int capacity() {
-        return oldTbl.length();
-    }
-
-    /**
-     * @return Adder object.
-     * @param ctx Task context.
-     */
-    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
-        if (inputs.get() != 0)
-            throw new IllegalStateException("Active inputs.");
-
-        if (state.get() == State.CLOSING)
-            throw new IllegalStateException("Closed.");
-
-        return new AdderImpl(ctx);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() {
-        assert inputs.get() == 0 : inputs.get();
-        assert adders.isEmpty() : adders.size();
-
-        state(State.READING_WRITING, State.CLOSING);
-
-        if (keys() == 0)
-            return;
-
-        super.close();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long meta(int idx) {
-        return oldTbl.get(idx);
-    }
-
-    /**
-     * Incrementally visits all the keys and values in the map.
-     *
-     * @param ignoreLastVisited Flag indicating that visiting must be started from the beginning.
-     * @param v Visitor.
-     * @return {@code false} If visiting was impossible due to rehashing.
-     */
-    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
-        if (!state.compareAndSet(State.READING_WRITING, State.VISITING)) {
-            assert state.get() != State.CLOSING;
-
-            return false; // Can not visit while rehashing happens.
-        }
-
-        AtomicLongArray tbl0 = oldTbl;
-
-        for (int i = 0; i < tbl0.length(); i++) {
-            long meta = tbl0.get(i);
-
-            while (meta != 0) {
-                long valPtr = value(meta);
-
-                long lastVisited = ignoreLastVisited ? 0 : lastVisitedValue(meta);
-
-                if (valPtr != lastVisited) {
-                    v.onKey(key(meta), keySize(meta));
-
-                    lastVisitedValue(meta, valPtr); // Set it to the first value in chain.
-
-                    do {
-                        v.onValue(valPtr + 12, valueSize(valPtr));
-
-                        valPtr = nextValue(valPtr);
-                    }
-                    while (valPtr != lastVisited);
-                }
-
-                meta = collision(meta);
-            }
-        }
-
-        state(State.VISITING, State.READING_WRITING);
-
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        inputs.incrementAndGet();
-
-        if (!adders.isEmpty())
-            throw new IllegalStateException("Active adders.");
-
-        State s = state.get();
-
-        if (s == State.CLOSING)
-            throw new IllegalStateException("Closed.");
-
-        assert s != State.REHASHING;
-
-        return new Input(taskCtx) {
-            @Override public void close() throws IgniteCheckedException {
-                if (inputs.decrementAndGet() < 0)
-                    throw new IllegalStateException();
-
-                super.close();
-            }
-        };
-    }
-
-    /**
-     * @param fromTbl Table.
-     */
-    private void rehashIfNeeded(AtomicLongArray fromTbl) {
-        if (fromTbl.length() == Integer.MAX_VALUE)
-            return;
-
-        long keys0 = keys();
-
-        if (keys0 < 3 * (fromTbl.length() >>> 2)) // New size has to be >= than 3/4 of capacity to rehash.
-            return;
-
-        if (fromTbl != newTbl) // Check if someone else have done the job.
-            return;
-
-        if (!state.compareAndSet(State.READING_WRITING, State.REHASHING)) {
-            assert state.get() != State.CLOSING; // Visiting is allowed, but we will not rehash.
-
-            return;
-        }
-
-        if (fromTbl != newTbl) { // Double check.
-            state(State.REHASHING, State.READING_WRITING); // Switch back.
-
-            return;
-        }
-
-        // Calculate new table capacity.
-        int newLen = fromTbl.length();
-
-        do {
-            newLen <<= 1;
-        }
-        while (newLen < keys0);
-
-        if (keys0 >= 3 * (newLen >>> 2)) // Still more than 3/4.
-            newLen <<= 1;
-
-        // This is our target table for rehashing.
-        AtomicLongArray toTbl = new AtomicLongArray(newLen);
-
-        // Make the new table visible before rehashing.
-        newTbl = toTbl;
-
-        // Rehash.
-        int newMask = newLen - 1;
-
-        long failedMeta = 0;
-
-        GridLongList collisions = new GridLongList(16);
-
-        for (int i = 0; i < fromTbl.length(); i++) { // Scan source table.
-            long meta = fromTbl.get(i);
-
-            assert meta != -1;
-
-            if (meta == 0) { // No entry.
-                failedMeta = 0;
-
-                if (!fromTbl.compareAndSet(i, 0, -1)) // Mark as moved.
-                    i--; // Retry.
-
-                continue;
-            }
-
-            do { // Collect all the collisions before the last one failed to nullify or 0.
-                collisions.add(meta);
-
-                meta = collision(meta);
-            }
-            while (meta != failedMeta);
-
-            do { // Go from the last to the first to avoid 'in-flight' state for meta entries.
-                meta = collisions.remove();
-
-                int addr = keyHash(meta) & newMask;
-
-                for (;;) { // Move meta entry to the new table.
-                    long toCollision = toTbl.get(addr);
-
-                    collision(meta, toCollision);
-
-                    if (toTbl.compareAndSet(addr, toCollision, meta))
-                        break;
-                }
-            }
-            while (!collisions.isEmpty());
-
-            // Here 'meta' will be a root pointer in old table.
-            if (!fromTbl.compareAndSet(i, meta, -1)) { // Try to mark as moved.
-                failedMeta = meta;
-
-                i--; // Retry the same address in table because new keys were added.
-            }
-            else
-                failedMeta = 0;
-        }
-
-        // Now old and new tables will be the same again.
-        oldTbl = toTbl;
-
-        state(State.REHASHING, State.READING_WRITING);
-    }
-
-    /**
-     * Switch state.
-     *
-     * @param oldState Expected state.
-     * @param newState New state.
-     */
-    private void state(State oldState, State newState) {
-        if (!state.compareAndSet(oldState, newState))
-            throw new IllegalStateException();
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Value pointer.
-     */
-    @Override protected long value(long meta) {
-        return mem.readLongVolatile(meta + 16);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param oldValPtr Old value.
-     * @param newValPtr New value.
-     * @return {@code true} If succeeded.
-     */
-    private boolean casValue(long meta, long oldValPtr, long newValPtr) {
-        return mem.casLong(meta + 16, oldValPtr, newValPtr);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Collision pointer.
-     */
-    @Override protected long collision(long meta) {
-        return mem.readLongVolatile(meta + 24);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param collision Collision pointer.
-     */
-    @Override protected void collision(long meta, long collision) {
-        assert meta != collision : meta;
-
-        mem.writeLongVolatile(meta + 24, collision);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Last visited value pointer.
-     */
-    private long lastVisitedValue(long meta) {
-        return mem.readLong(meta + 32);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param valPtr Last visited value pointer.
-     */
-    private void lastVisitedValue(long meta, long valPtr) {
-        mem.writeLong(meta + 32, valPtr);
-    }
-
-    /**
-     * Adder. Must not be shared between threads.
-     */
-    private class AdderImpl extends AdderBase {
-        /** */
-        private final Reader keyReader;
-
-        /** */
-        private final AtomicInteger locKeys = new AtomicInteger();
-
-        /** */
-        private final Random rnd = new GridRandom();
-
-        /**
-         * @param ctx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        private AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
-            super(ctx);
-
-            keyReader = new Reader(keySer);
-
-            rehashIfNeeded(oldTbl);
-
-            adders.add(this);
-        }
-
-        /**
-         * @param in Data input.
-         * @param reuse Reusable key.
-         * @return Key.
-         * @throws IgniteCheckedException If failed.
-         */
-        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
-            KeyImpl k = reuse == null ? new KeyImpl() : (KeyImpl)reuse;
-
-            k.tmpKey = keySer.read(in, k.tmpKey);
-
-            k.meta = add(k.tmpKey, null);
-
-            return k;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void write(Object key, Object val) throws IgniteCheckedException {
-            A.notNull(val, "val");
-
-            add(key, val);
-        }
-
-        /**
-         * @param tbl Table.
-         */
-        private void incrementKeys(AtomicLongArray tbl) {
-            locKeys.lazySet(locKeys.get() + 1);
-
-            if (rnd.nextInt(tbl.length()) < 512)
-                rehashIfNeeded(tbl);
-        }
-
-        /**
-         * @param keyHash Key hash.
-         * @param keySize Key size.
-         * @param keyPtr Key pointer.
-         * @param valPtr Value page pointer.
-         * @param collisionPtr Pointer to meta with hash collision.
-         * @param lastVisitedVal Last visited value pointer.
-         * @return Created meta page pointer.
-         */
-        private long createMeta(int keyHash, int keySize, long keyPtr, long valPtr, long collisionPtr, long lastVisitedVal) {
-            long meta = allocate(40);
-
-            mem.writeInt(meta, keyHash);
-            mem.writeInt(meta + 4, keySize);
-            mem.writeLong(meta + 8, keyPtr);
-            mem.writeLong(meta + 16, valPtr);
-            mem.writeLong(meta + 24, collisionPtr);
-            mem.writeLong(meta + 32, lastVisitedVal);
-
-            return meta;
-        }
-
-        /**
-         * @param key Key.
-         * @param val Value.
-         * @return Updated or created meta page pointer.
-         * @throws IgniteCheckedException If failed.
-         */
-        private long add(Object key, @Nullable Object val) throws IgniteCheckedException {
-            AtomicLongArray tbl = oldTbl;
-
-            int keyHash = U.hash(key.hashCode());
-
-            long newMetaPtr = 0;
-
-            long valPtr = 0;
-
-            if (val != null) {
-                valPtr = write(12, val, valSer);
-                int valSize = writtenSize() - 12;
-
-                valueSize(valPtr, valSize);
-            }
-
-            for (AtomicLongArray old = null;;) {
-                int addr = keyHash & (tbl.length() - 1);
-
-                long metaPtrRoot = tbl.get(addr); // Read root meta pointer at this address.
-
-                if (metaPtrRoot == -1) { // The cell was already moved by rehashing.
-                    AtomicLongArray n = newTbl; // Need to read newTbl first here.
-                    AtomicLongArray o = oldTbl;
-
-                    tbl = tbl == o ? n : o; // Trying to get the oldest table but newer than ours.
-
-                    old = null;
-
-                    continue;
-                }
-
-                if (metaPtrRoot != 0) { // Not empty slot.
-                    long metaPtr = metaPtrRoot;
-
-                    do { // Scan all the collisions.
-                        if (keyHash(metaPtr) == keyHash && key.equals(keyReader.readKey(metaPtr))) { // Found key.
-                            if (newMetaPtr != 0)  // Deallocate new meta if one was allocated.
-                                localDeallocate(key(newMetaPtr)); // Key was allocated first, so rewind to it's pointer.
-
-                            if (valPtr != 0) { // Add value if it exists.
-                                long nextValPtr;
-
-                                // Values are linked to each other to a stack like structure.
-                                // Replace the last value in meta with ours and link it as next.
-                                do {
-                                    nextValPtr = value(metaPtr);
-
-                                    nextValue(valPtr, nextValPtr);
-                                }
-                                while (!casValue(metaPtr, nextValPtr, valPtr));
-                            }
-
-                            return metaPtr;
-                        }
-
-                        metaPtr = collision(metaPtr);
-                    }
-                    while (metaPtr != 0);
-
-                    // Here we did not find our key, need to check if it was moved by rehashing to the new table.
-                    if (old == null) { // If the old table already set, then we will just try to update it.
-                        AtomicLongArray n = newTbl;
-
-                        if (n != tbl) { // Rehashing happens, try to find the key in new table but preserve the old one.
-                            old = tbl;
-                            tbl = n;
-
-                            continue;
-                        }
-                    }
-                }
-
-                if (old != null) { // We just checked new table but did not find our key as well as in the old one.
-                    tbl = old; // Try to add new key to the old table.
-
-                    addr = keyHash & (tbl.length() - 1);
-
-                    old = null;
-                }
-
-                if (newMetaPtr == 0) { // Allocate new meta page.
-                    long keyPtr = write(0, key, keySer);
-                    int keySize = writtenSize();
-
-                    if (valPtr != 0)
-                        nextValue(valPtr, 0);
-
-                    newMetaPtr = createMeta(keyHash, keySize, keyPtr, valPtr, metaPtrRoot, 0);
-                }
-                else // Update new meta with root pointer collision.
-                    collision(newMetaPtr, metaPtrRoot);
-
-                if (tbl.compareAndSet(addr, metaPtrRoot, newMetaPtr)) { // Try to replace root pointer with new one.
-                    incrementKeys(tbl);
-
-                    return newMetaPtr;
-                }
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            if (!adders.remove(this))
-                throw new IllegalStateException();
-
-            keys.addAndGet(locKeys.get()); // Here we have race and #keys() method can return wrong result but it is ok.
-
-            super.close();
-        }
-
-        /**
-         * Key.
-         */
-        private class KeyImpl implements Key {
-            /** */
-            private long meta;
-
-            /** */
-            private Object tmpKey;
-
-            /**
-             * @return Meta pointer for the key.
-             */
-            public long address() {
-                return meta;
-            }
-
-            /**
-             * @param val Value.
-             */
-            @Override public void add(Value val) {
-                int size = val.size();
-
-                long valPtr = allocate(size + 12);
-
-                val.copyTo(valPtr + 12);
-
-                valueSize(valPtr, size);
-
-                long nextVal;
-
-                do {
-                    nextVal = value(meta);
-
-                    nextValue(valPtr, nextVal);
-                }
-                while(!casValue(meta, nextVal, valPtr));
-            }
-        }
-    }
-
-    /**
-     * Current map state.
-     */
-    private enum State {
-        /** */
-        REHASHING,
-
-        /** */
-        VISITING,
-
-        /** */
-        READING_WRITING,
-
-        /** */
-        CLOSING
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
deleted file mode 100644
index c32e9af..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Hash multimap.
- */
-public class HadoopHashMultimap extends HadoopHashMultimapBase {
-    /** */
-    private long[] tbl;
-
-    /** */
-    private int keys;
-
-    /**
-     * @param jobInfo Job info.
-     * @param mem Memory.
-     * @param cap Initial capacity.
-     */
-    public HadoopHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) {
-        super(jobInfo, mem);
-
-        assert U.isPow2(cap) : cap;
-
-        tbl = new long[cap];
-    }
-
-    /** {@inheritDoc} */
-    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
-        return new AdderImpl(ctx);
-    }
-
-    /**
-     * Rehash.
-     */
-    private void rehash() {
-        long[] newTbl = new long[tbl.length << 1];
-
-        int newMask = newTbl.length - 1;
-
-        for (long meta : tbl) {
-            while (meta != 0) {
-                long collision = collision(meta);
-
-                int idx = keyHash(meta) & newMask;
-
-                collision(meta, newTbl[idx]);
-
-                newTbl[idx] = meta;
-
-                meta = collision;
-            }
-        }
-
-        tbl = newTbl;
-    }
-
-    /**
-     * @return Keys count.
-     */
-    public int keys() {
-        return keys;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int capacity() {
-        return tbl.length;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long meta(int idx) {
-        return tbl[idx];
-    }
-
-    /**
-     * Adder.
-     */
-    private class AdderImpl extends AdderBase {
-        /** */
-        private final Reader keyReader;
-
-        /**
-         * @param ctx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
-            super(ctx);
-
-            keyReader = new Reader(keySer);
-        }
-
-        /**
-         * @param keyHash Key hash.
-         * @param keySize Key size.
-         * @param keyPtr Key pointer.
-         * @param valPtr Value page pointer.
-         * @param collisionPtr Pointer to meta with hash collision.
-         * @return Created meta page pointer.
-         */
-        private long createMeta(int keyHash, int keySize, long keyPtr, long valPtr, long collisionPtr) {
-            long meta = allocate(32);
-
-            mem.writeInt(meta, keyHash);
-            mem.writeInt(meta + 4, keySize);
-            mem.writeLong(meta + 8, keyPtr);
-            mem.writeLong(meta + 16, valPtr);
-            mem.writeLong(meta + 24, collisionPtr);
-
-            return meta;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void write(Object key, Object val) throws IgniteCheckedException {
-            A.notNull(val, "val");
-
-            int keyHash = U.hash(key.hashCode());
-
-            // Write value.
-            long valPtr = write(12, val, valSer);
-            int valSize = writtenSize() - 12;
-
-            valueSize(valPtr, valSize);
-
-            // Find position in table.
-            int idx = keyHash & (tbl.length - 1);
-
-            long meta = tbl[idx];
-
-            // Search for our key in collisions.
-            while (meta != 0) {
-                if (keyHash(meta) == keyHash && key.equals(keyReader.readKey(meta))) { // Found key.
-                    nextValue(valPtr, value(meta));
-
-                    value(meta, valPtr);
-
-                    return;
-                }
-
-                meta = collision(meta);
-            }
-
-            // Write key.
-            long keyPtr = write(0, key, keySer);
-            int keySize = writtenSize();
-
-            nextValue(valPtr, 0);
-
-            tbl[idx] = createMeta(keyHash, keySize, keyPtr, valPtr, tbl[idx]);
-
-            if (++keys > (tbl.length >>> 2) * 3)
-                rehash();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
deleted file mode 100644
index 8d9b3c3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import java.util.Iterator;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-
-/**
- * Base class for hash multimaps.
- */
-public abstract class HadoopHashMultimapBase extends HadoopMultimapBase {
-    /**
-     * @param jobInfo Job info.
-     * @param mem Memory.
-     */
-    protected HadoopHashMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
-        super(jobInfo, mem);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
-        throw new UnsupportedOperationException("visit");
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        return new Input(taskCtx);
-    }
-
-    /**
-     * @return Hash table capacity.
-     */
-    public abstract int capacity();
-
-    /**
-     * @param idx Index in hash table.
-     * @return Meta page pointer.
-     */
-    protected abstract long meta(int idx);
-
-    /**
-     * @param meta Meta pointer.
-     * @return Key hash.
-     */
-    protected int keyHash(long meta) {
-        return mem.readInt(meta);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Key size.
-     */
-    protected int keySize(long meta) {
-        return mem.readInt(meta + 4);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Key pointer.
-     */
-    protected long key(long meta) {
-        return mem.readLong(meta + 8);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Value pointer.
-     */
-    protected long value(long meta) {
-        return mem.readLong(meta + 16);
-    }
-    /**
-     * @param meta Meta pointer.
-     * @param val Value pointer.
-     */
-    protected void value(long meta, long val) {
-        mem.writeLong(meta + 16, val);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Collision pointer.
-     */
-    protected long collision(long meta) {
-        return mem.readLong(meta + 24);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param collision Collision pointer.
-     */
-    protected void collision(long meta, long collision) {
-        assert meta != collision : meta;
-
-        mem.writeLong(meta + 24, collision);
-    }
-
-    /**
-     * Reader for key and value.
-     */
-    protected class Reader extends ReaderBase {
-        /**
-         * @param ser Serialization.
-         */
-        protected Reader(HadoopSerialization ser) {
-            super(ser);
-        }
-
-        /**
-         * @param meta Meta pointer.
-         * @return Key.
-         */
-        public Object readKey(long meta) {
-            assert meta > 0 : meta;
-
-            try {
-                return read(key(meta), keySize(meta));
-            }
-            catch (IgniteCheckedException e) {
-                throw new IgniteException(e);
-            }
-        }
-    }
-
-    /**
-     * Task input.
-     */
-    protected class Input implements HadoopTaskInput {
-        /** */
-        private int idx = -1;
-
-        /** */
-        private long metaPtr;
-
-        /** */
-        private final int cap;
-
-        /** */
-        private final Reader keyReader;
-
-        /** */
-        private final Reader valReader;
-
-        /**
-         * @param taskCtx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        public Input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-            cap = capacity();
-
-            keyReader = new Reader(taskCtx.keySerialization());
-            valReader = new Reader(taskCtx.valueSerialization());
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean next() {
-            if (metaPtr != 0) {
-                metaPtr = collision(metaPtr);
-
-                if (metaPtr != 0)
-                    return true;
-            }
-
-            while (++idx < cap) { // Scan table.
-                metaPtr = meta(idx);
-
-                if (metaPtr != 0)
-                    return true;
-            }
-
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Object key() {
-            return keyReader.readKey(metaPtr);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Iterator<?> values() {
-            return new ValueIterator(value(metaPtr), valReader);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            keyReader.close();
-            valReader.close();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
deleted file mode 100644
index 5b71c47..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import java.io.DataInput;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Multimap for hadoop intermediate results.
- */
-@SuppressWarnings("PublicInnerClass")
-public interface HadoopMultimap extends AutoCloseable {
-    /**
-     * Incrementally visits all the keys and values in the map.
-     *
-     * @param ignoreLastVisited Flag indicating that visiting must be started from the beginning.
-     * @param v Visitor.
-     * @return {@code false} If visiting was impossible.
-     */
-    public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException;
-
-    /**
-     * @param ctx Task context.
-     * @return Adder.
-     * @throws IgniteCheckedException If failed.
-     */
-    public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException;
-
-    /**
-     * @param taskCtx Task context.
-     * @return Task input.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopTaskInput input(HadoopTaskContext taskCtx)
-        throws IgniteCheckedException;
-
-    /** {@inheritDoc} */
-    @Override public void close();
-
-    /**
-     * Adder.
-     */
-    public interface Adder extends HadoopTaskOutput {
-        /**
-         * @param in Data input.
-         * @param reuse Reusable key.
-         * @return Key.
-         * @throws IgniteCheckedException If failed.
-         */
-        public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException;
-    }
-
-    /**
-     * Key add values to.
-     */
-    public interface Key {
-        /**
-         * @param val Value.
-         */
-        public void add(Value val);
-    }
-
-    /**
-     * Value.
-     */
-    public interface Value {
-        /**
-         * @return Size in bytes.
-         */
-        public int size();
-
-        /**
-         * @param ptr Pointer.
-         */
-        public void copyTo(long ptr);
-    }
-
-    /**
-     * Key and values visitor.
-     */
-    public interface Visitor {
-        /**
-         * @param keyPtr Key pointer.
-         * @param keySize Key size.
-         */
-        public void onKey(long keyPtr, int keySize) throws IgniteCheckedException;
-
-        /**
-         * @param valPtr Value pointer.
-         * @param valSize Value size.
-         */
-        public void onValue(long valPtr, int valSize) throws IgniteCheckedException;
-    }
-}
\ No newline at end of file


[12/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
deleted file mode 100644
index dc5874d..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
+++ /dev/null
@@ -1,976 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.locks.ReentrantLock;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.HadoopContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskExecutorAdapter;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child.HadoopExternalProcessStarter;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopMessageListener;
-import org.apache.ignite.internal.util.GridSpinReadWriteLock;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.typedef.CI1;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.spi.IgnitePortProtocol;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentHashMap8;
-import org.jsr166.ConcurrentLinkedDeque8;
-
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.CRASHED;
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.FAILED;
-
-/**
- * External process registry. Handles external process lifecycle.
- */
-public class HadoopExternalTaskExecutor extends HadoopTaskExecutorAdapter {
-    /** Hadoop context. */
-    private HadoopContext ctx;
-
-    /** */
-    private String javaCmd;
-
-    /** Logger. */
-    private IgniteLogger log;
-
-    /** Node process descriptor. */
-    private HadoopProcessDescriptor nodeDesc;
-
-    /** Output base. */
-    private File outputBase;
-
-    /** Path separator. */
-    private String pathSep;
-
-    /** Hadoop external communication. */
-    private HadoopExternalCommunication comm;
-
-    /** Starting processes. */
-    private final ConcurrentMap<UUID, HadoopProcess> runningProcsByProcId = new ConcurrentHashMap8<>();
-
-    /** Starting processes. */
-    private final ConcurrentMap<HadoopJobId, HadoopProcess> runningProcsByJobId = new ConcurrentHashMap8<>();
-
-    /** Busy lock. */
-    private final GridSpinReadWriteLock busyLock = new GridSpinReadWriteLock();
-
-    /** Job tracker. */
-    private HadoopJobTracker jobTracker;
-
-    /** {@inheritDoc} */
-    @Override public void start(HadoopContext ctx) throws IgniteCheckedException {
-        this.ctx = ctx;
-
-        log = ctx.kernalContext().log(HadoopExternalTaskExecutor.class);
-
-        outputBase = U.resolveWorkDirectory("hadoop", false);
-
-        pathSep = System.getProperty("path.separator", U.isWindows() ? ";" : ":");
-
-        initJavaCommand();
-
-        comm = new HadoopExternalCommunication(
-            ctx.localNodeId(),
-            UUID.randomUUID(),
-            ctx.kernalContext().config().getMarshaller(),
-            log,
-            ctx.kernalContext().getSystemExecutorService(),
-            ctx.kernalContext().gridName());
-
-        comm.setListener(new MessageListener());
-
-        comm.start();
-
-        nodeDesc = comm.localProcessDescriptor();
-
-        ctx.kernalContext().ports().registerPort(nodeDesc.tcpPort(), IgnitePortProtocol.TCP,
-            HadoopExternalTaskExecutor.class);
-
-        if (nodeDesc.sharedMemoryPort() != -1)
-            ctx.kernalContext().ports().registerPort(nodeDesc.sharedMemoryPort(), IgnitePortProtocol.TCP,
-                HadoopExternalTaskExecutor.class);
-
-        jobTracker = ctx.jobTracker();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop(boolean cancel) {
-        busyLock.writeLock();
-
-        try {
-            comm.stop();
-        }
-        catch (IgniteCheckedException e) {
-            U.error(log, "Failed to gracefully stop external hadoop communication server (will shutdown anyway)", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onJobStateChanged(final HadoopJobMetadata meta) {
-        final HadoopProcess proc = runningProcsByJobId.get(meta.jobId());
-
-        // If we have a local process for this job.
-        if (proc != null) {
-            if (log.isDebugEnabled())
-                log.debug("Updating job information for remote task process [proc=" + proc + ", meta=" + meta + ']');
-
-            if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) {
-                if (log.isDebugEnabled())
-                    log.debug("Completed job execution, will terminate child process [jobId=" + meta.jobId() +
-                        ", proc=" + proc + ']');
-
-                runningProcsByJobId.remove(meta.jobId());
-                runningProcsByProcId.remove(proc.descriptor().processId());
-
-                proc.terminate();
-
-                return;
-            }
-
-            if (proc.initFut.isDone()) {
-                if (!proc.initFut.isFailed())
-                    sendJobInfoUpdate(proc, meta);
-                else if (log.isDebugEnabled())
-                    log.debug("Failed to initialize child process (will skip job state notification) " +
-                        "[jobId=" + meta.jobId() + ", meta=" + meta + ']');
-            }
-            else {
-                proc.initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
-                    @Override
-                    public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
-                        try {
-                            f.get();
-
-                            sendJobInfoUpdate(proc, meta);
-                        }
-                        catch (IgniteCheckedException e) {
-                            if (log.isDebugEnabled())
-                                log.debug("Failed to initialize child process (will skip job state notification) " +
-                                    "[jobId=" + meta.jobId() + ", meta=" + meta + ", err=" + e + ']');
-                        }
-
-                    }
-                });
-            }
-        }
-        else if (ctx.isParticipating(meta)) {
-            HadoopJob job;
-
-            try {
-                job = jobTracker.job(meta.jobId(), meta.jobInfo());
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to get job: " + meta.jobId(), e);
-
-                return;
-            }
-
-            startProcess(job, meta.mapReducePlan());
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ConstantConditions")
-    @Override public void run(final HadoopJob job, final Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock()) {
-            if (log.isDebugEnabled())
-                log.debug("Failed to start hadoop tasks (grid is stopping, will ignore).");
-
-            return;
-        }
-
-        try {
-            HadoopProcess proc = runningProcsByJobId.get(job.id());
-
-            HadoopTaskType taskType = F.first(tasks).type();
-
-            if (taskType == HadoopTaskType.SETUP || taskType == HadoopTaskType.ABORT ||
-                taskType == HadoopTaskType.COMMIT) {
-                if (proc == null || proc.terminated()) {
-                    runningProcsByJobId.remove(job.id(), proc);
-
-                    // Start new process for ABORT task since previous processes were killed.
-                    proc = startProcess(job, jobTracker.plan(job.id()));
-
-                    if (log.isDebugEnabled())
-                        log.debug("Starting new process for maintenance task [jobId=" + job.id() +
-                            ", proc=" + proc + ", taskType=" + taskType + ']');
-                }
-            }
-            else
-                assert proc != null : "Missing started process for task execution request: " + job.id() +
-                    ", tasks=" + tasks;
-
-            final HadoopProcess proc0 = proc;
-
-            proc.initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
-                @Override public void apply(
-                    IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
-                    if (!busyLock.tryReadLock())
-                        return;
-
-                    try {
-                        f.get();
-
-                        proc0.addTasks(tasks);
-
-                        if (log.isDebugEnabled())
-                            log.debug("Sending task execution request to child process [jobId=" + job.id() +
-                                ", proc=" + proc0 + ", tasks=" + tasks + ']');
-
-                        sendExecutionRequest(proc0, job, tasks);
-                    }
-                    catch (IgniteCheckedException e) {
-                        notifyTasksFailed(tasks, FAILED, e);
-                    }
-                    finally {
-                        busyLock.readUnlock();
-                    }
-                }
-            });
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cancelTasks(HadoopJobId jobId) {
-        HadoopProcess proc = runningProcsByJobId.get(jobId);
-
-        if (proc != null)
-            proc.terminate();
-    }
-
-    /**
-     * Sends execution request to remote node.
-     *
-     * @param proc Process to send request to.
-     * @param job Job instance.
-     * @param tasks Collection of tasks to execute in started process.
-     */
-    private void sendExecutionRequest(HadoopProcess proc, HadoopJob job, Collection<HadoopTaskInfo> tasks)
-        throws IgniteCheckedException {
-        // Must synchronize since concurrent process crash may happen and will receive onConnectionLost().
-        proc.lock();
-
-        try {
-            if (proc.terminated()) {
-                notifyTasksFailed(tasks, CRASHED, null);
-
-                return;
-            }
-
-            HadoopTaskExecutionRequest req = new HadoopTaskExecutionRequest();
-
-            req.jobId(job.id());
-            req.jobInfo(job.info());
-            req.tasks(tasks);
-
-            comm.sendMessage(proc.descriptor(), req);
-        }
-        finally {
-            proc.unlock();
-        }
-    }
-
-    /**
-     * @return External task metadata.
-     */
-    private HadoopExternalTaskMetadata buildTaskMeta() {
-        HadoopExternalTaskMetadata meta = new HadoopExternalTaskMetadata();
-
-        meta.classpath(Arrays.asList(System.getProperty("java.class.path").split(File.pathSeparator)));
-        meta.jvmOptions(Arrays.asList("-Xmx1g", "-ea", "-XX:+UseConcMarkSweepGC", "-XX:+CMSClassUnloadingEnabled",
-            "-DIGNITE_HOME=" + U.getIgniteHome()));
-
-        return meta;
-    }
-
-    /**
-     * @param tasks Tasks to notify about.
-     * @param state Fail state.
-     * @param e Optional error.
-     */
-    private void notifyTasksFailed(Iterable<HadoopTaskInfo> tasks, HadoopTaskState state, Throwable e) {
-        HadoopTaskStatus fail = new HadoopTaskStatus(state, e);
-
-        for (HadoopTaskInfo task : tasks)
-            jobTracker.onTaskFinished(task, fail);
-    }
-
-    /**
-     * Starts process template that will be ready to execute Hadoop tasks.
-     *
-     * @param job Job instance.
-     * @param plan Map reduce plan.
-     */
-    private HadoopProcess startProcess(final HadoopJob job, final HadoopMapReducePlan plan) {
-        final UUID childProcId = UUID.randomUUID();
-
-        HadoopJobId jobId = job.id();
-
-        final HadoopProcessFuture fut = new HadoopProcessFuture(childProcId, jobId);
-
-        final HadoopProcess proc = new HadoopProcess(jobId, fut, plan.reducers(ctx.localNodeId()));
-
-        HadoopProcess old = runningProcsByJobId.put(jobId, proc);
-
-        assert old == null;
-
-        old = runningProcsByProcId.put(childProcId, proc);
-
-        assert old == null;
-
-        ctx.kernalContext().closure().runLocalSafe(new Runnable() {
-            @Override public void run() {
-                if (!busyLock.tryReadLock()) {
-                    fut.onDone(new IgniteCheckedException("Failed to start external process (grid is stopping)."));
-
-                    return;
-                }
-
-                try {
-                    HadoopExternalTaskMetadata startMeta = buildTaskMeta();
-
-                    if (log.isDebugEnabled())
-                        log.debug("Created hadoop child process metadata for job [job=" + job +
-                            ", childProcId=" + childProcId + ", taskMeta=" + startMeta + ']');
-
-                    Process proc = startJavaProcess(childProcId, startMeta, job);
-
-                    BufferedReader rdr = new BufferedReader(new InputStreamReader(proc.getInputStream()));
-
-                    String line;
-
-                    // Read up all the process output.
-                    while ((line = rdr.readLine()) != null) {
-                        if (log.isDebugEnabled())
-                            log.debug("Tracing process output: " + line);
-
-                        if ("Started".equals(line)) {
-                            // Process started successfully, it should not write anything more to the output stream.
-                            if (log.isDebugEnabled())
-                                log.debug("Successfully started child process [childProcId=" + childProcId +
-                                    ", meta=" + job + ']');
-
-                            fut.onProcessStarted(proc);
-
-                            break;
-                        }
-                        else if ("Failed".equals(line)) {
-                            StringBuilder sb = new StringBuilder("Failed to start child process: " + job + "\n");
-
-                            while ((line = rdr.readLine()) != null)
-                                sb.append("    ").append(line).append("\n");
-
-                            // Cut last character.
-                            sb.setLength(sb.length() - 1);
-
-                            log.warning(sb.toString());
-
-                            fut.onDone(new IgniteCheckedException(sb.toString()));
-
-                            break;
-                        }
-                    }
-                }
-                catch (Throwable e) {
-                    fut.onDone(new IgniteCheckedException("Failed to initialize child process: " + job, e));
-
-                    if (e instanceof Error)
-                        throw (Error)e;
-                }
-                finally {
-                    busyLock.readUnlock();
-                }
-            }
-        }, true);
-
-        fut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
-            @Override public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
-                try {
-                    // Make sure there were no exceptions.
-                    f.get();
-
-                    prepareForJob(proc, job, plan);
-                }
-                catch (IgniteCheckedException ignore) {
-                    // Exception is printed in future's onDone() method.
-                }
-            }
-        });
-
-        return proc;
-    }
-
-    /**
-     * Checks that java local command is available.
-     *
-     * @throws IgniteCheckedException If initialization failed.
-     */
-    private void initJavaCommand() throws IgniteCheckedException {
-        String javaHome = System.getProperty("java.home");
-
-        if (javaHome == null)
-            javaHome = System.getenv("JAVA_HOME");
-
-        if (javaHome == null)
-            throw new IgniteCheckedException("Failed to locate JAVA_HOME.");
-
-        javaCmd = javaHome + File.separator + "bin" + File.separator + (U.isWindows() ? "java.exe" : "java");
-
-        try {
-            Process proc = new ProcessBuilder(javaCmd, "-version").redirectErrorStream(true).start();
-
-            Collection<String> out = readProcessOutput(proc);
-
-            int res = proc.waitFor();
-
-            if (res != 0)
-                throw new IgniteCheckedException("Failed to execute 'java -version' command (process finished with nonzero " +
-                    "code) [exitCode=" + res + ", javaCmd='" + javaCmd + "', msg=" + F.first(out) + ']');
-
-            if (log.isInfoEnabled()) {
-                log.info("Will use java for external task execution: ");
-
-                for (String s : out)
-                    log.info("    " + s);
-            }
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException("Failed to check java for external task execution.", e);
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            throw new IgniteCheckedException("Failed to wait for process completion (thread got interrupted).", e);
-        }
-    }
-
-    /**
-     * Reads process output line-by-line.
-     *
-     * @param proc Process to read output.
-     * @return Read lines.
-     * @throws IOException If read failed.
-     */
-    private Collection<String> readProcessOutput(Process proc) throws IOException {
-        BufferedReader rdr = new BufferedReader(new InputStreamReader(proc.getInputStream()));
-
-        Collection<String> res = new ArrayList<>();
-
-        String s;
-
-        while ((s = rdr.readLine()) != null)
-            res.add(s);
-
-        return res;
-    }
-
-    /**
-     * Builds process from metadata.
-     *
-     * @param childProcId Child process ID.
-     * @param startMeta Metadata.
-     * @param job Job.
-     * @return Started process.
-     */
-    private Process startJavaProcess(UUID childProcId, HadoopExternalTaskMetadata startMeta,
-        HadoopJob job) throws Exception {
-        String outFldr = jobWorkFolder(job.id()) + File.separator + childProcId;
-
-        if (log.isDebugEnabled())
-            log.debug("Will write process log output to: " + outFldr);
-
-        List<String> cmd = new ArrayList<>();
-
-        File workDir = U.resolveWorkDirectory("", false);
-
-        cmd.add(javaCmd);
-        cmd.addAll(startMeta.jvmOptions());
-        cmd.add("-cp");
-        cmd.add(buildClasspath(startMeta.classpath()));
-        cmd.add(HadoopExternalProcessStarter.class.getName());
-        cmd.add("-cpid");
-        cmd.add(String.valueOf(childProcId));
-        cmd.add("-ppid");
-        cmd.add(String.valueOf(nodeDesc.processId()));
-        cmd.add("-nid");
-        cmd.add(String.valueOf(nodeDesc.parentNodeId()));
-        cmd.add("-addr");
-        cmd.add(nodeDesc.address());
-        cmd.add("-tport");
-        cmd.add(String.valueOf(nodeDesc.tcpPort()));
-        cmd.add("-sport");
-        cmd.add(String.valueOf(nodeDesc.sharedMemoryPort()));
-        cmd.add("-out");
-        cmd.add(outFldr);
-        cmd.add("-wd");
-        cmd.add(workDir.getAbsolutePath());
-
-        return new ProcessBuilder(cmd)
-            .redirectErrorStream(true)
-            .directory(workDir)
-            .start();
-    }
-
-    /**
-     * Gets job work folder.
-     *
-     * @param jobId Job ID.
-     * @return Job work folder.
-     */
-    private String jobWorkFolder(HadoopJobId jobId) {
-        return outputBase + File.separator + "Job_" + jobId;
-    }
-
-    /**
-     * @param cp Classpath collection.
-     * @return Classpath string.
-     */
-    private String buildClasspath(Collection<String> cp) {
-        assert !cp.isEmpty();
-
-        StringBuilder sb = new StringBuilder();
-
-        for (String s : cp)
-            sb.append(s).append(pathSep);
-
-        sb.setLength(sb.length() - 1);
-
-        return sb.toString();
-    }
-
-    /**
-     * Sends job info update request to remote process.
-     *
-     * @param proc Process to send request to.
-     * @param meta Job metadata.
-     */
-    private void sendJobInfoUpdate(HadoopProcess proc, HadoopJobMetadata meta) {
-        Map<Integer, HadoopProcessDescriptor> rdcAddrs = meta.reducersAddresses();
-
-        int rdcNum = meta.mapReducePlan().reducers();
-
-        HadoopProcessDescriptor[] addrs = null;
-
-        if (rdcAddrs != null && rdcAddrs.size() == rdcNum) {
-            addrs = new HadoopProcessDescriptor[rdcNum];
-
-            for (int i = 0; i < rdcNum; i++) {
-                HadoopProcessDescriptor desc = rdcAddrs.get(i);
-
-                assert desc != null : "Missing reducing address [meta=" + meta + ", rdc=" + i + ']';
-
-                addrs[i] = desc;
-            }
-        }
-
-        try {
-            comm.sendMessage(proc.descriptor(), new HadoopJobInfoUpdateRequest(proc.jobId, meta.phase(), addrs));
-        }
-        catch (IgniteCheckedException e) {
-            if (!proc.terminated()) {
-                log.error("Failed to send job state update message to remote child process (will kill the process) " +
-                    "[jobId=" + proc.jobId + ", meta=" + meta + ']', e);
-
-                proc.terminate();
-            }
-        }
-    }
-
-    /**
-     * Sends prepare request to remote process.
-     *
-     * @param proc Process to send request to.
-     * @param job Job.
-     * @param plan Map reduce plan.
-     */
-    private void prepareForJob(HadoopProcess proc, HadoopJob job, HadoopMapReducePlan plan) {
-        try {
-            comm.sendMessage(proc.descriptor(), new HadoopPrepareForJobRequest(job.id(), job.info(),
-                plan.reducers(), plan.reducers(ctx.localNodeId())));
-        }
-        catch (IgniteCheckedException e) {
-            U.error(log, "Failed to send job prepare request to remote process [proc=" + proc + ", job=" + job +
-                ", plan=" + plan + ']', e);
-
-            proc.terminate();
-        }
-    }
-
-    /**
-     * Processes task finished message.
-     *
-     * @param desc Remote process descriptor.
-     * @param taskMsg Task finished message.
-     */
-    private void processTaskFinishedMessage(HadoopProcessDescriptor desc, HadoopTaskFinishedMessage taskMsg) {
-        HadoopProcess proc = runningProcsByProcId.get(desc.processId());
-
-        if (proc != null)
-            proc.removeTask(taskMsg.taskInfo());
-
-        jobTracker.onTaskFinished(taskMsg.taskInfo(), taskMsg.status());
-    }
-
-    /**
-     *
-     */
-    private class MessageListener implements HadoopMessageListener {
-        /** {@inheritDoc} */
-        @Override public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg) {
-            if (!busyLock.tryReadLock())
-                return;
-
-            try {
-                if (msg instanceof HadoopProcessStartedAck) {
-                    HadoopProcess proc = runningProcsByProcId.get(desc.processId());
-
-                    assert proc != null : "Missing child process for processId: " + desc;
-
-                    HadoopProcessFuture fut = proc.initFut;
-
-                    if (fut != null)
-                        fut.onReplyReceived(desc);
-                    // Safety.
-                    else
-                        log.warning("Failed to find process start future (will ignore): " + desc);
-                }
-                else if (msg instanceof HadoopTaskFinishedMessage) {
-                    HadoopTaskFinishedMessage taskMsg = (HadoopTaskFinishedMessage)msg;
-
-                    processTaskFinishedMessage(desc, taskMsg);
-                }
-                else
-                    log.warning("Unexpected message received by node [desc=" + desc + ", msg=" + msg + ']');
-            }
-            finally {
-                busyLock.readUnlock();
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onConnectionLost(HadoopProcessDescriptor desc) {
-            if (!busyLock.tryReadLock())
-                return;
-
-            try {
-                if (desc == null) {
-                    U.warn(log, "Handshake failed.");
-
-                    return;
-                }
-
-                // Notify job tracker about failed tasks.
-                HadoopProcess proc = runningProcsByProcId.get(desc.processId());
-
-                if (proc != null) {
-                    Collection<HadoopTaskInfo> tasks = proc.tasks();
-
-                    if (!F.isEmpty(tasks)) {
-                        log.warning("Lost connection with alive process (will terminate): " + desc);
-
-                        HadoopTaskStatus status = new HadoopTaskStatus(CRASHED,
-                            new IgniteCheckedException("Failed to run tasks (external process finished unexpectedly): " + desc));
-
-                        for (HadoopTaskInfo info : tasks)
-                            jobTracker.onTaskFinished(info, status);
-
-                        runningProcsByJobId.remove(proc.jobId(), proc);
-                    }
-
-                    // Safety.
-                    proc.terminate();
-                }
-            }
-            finally {
-                busyLock.readUnlock();
-            }
-        }
-    }
-
-    /**
-     * Hadoop process.
-     */
-    private static class HadoopProcess extends ReentrantLock {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Job ID. */
-        private final HadoopJobId jobId;
-
-        /** Process. */
-        private Process proc;
-
-        /** Init future. Completes when process is ready to receive messages. */
-        private final HadoopProcessFuture initFut;
-
-        /** Process descriptor. */
-        private HadoopProcessDescriptor procDesc;
-
-        /** Reducers planned for this process. */
-        private Collection<Integer> reducers;
-
-        /** Tasks. */
-        private final Collection<HadoopTaskInfo> tasks = new ConcurrentLinkedDeque8<>();
-
-        /** Terminated flag. */
-        private volatile boolean terminated;
-
-        /**
-         * @param jobId Job ID.
-         * @param initFut Init future.
-         */
-        private HadoopProcess(HadoopJobId jobId, HadoopProcessFuture initFut,
-            int[] reducers) {
-            this.jobId = jobId;
-            this.initFut = initFut;
-
-            if (!F.isEmpty(reducers)) {
-                this.reducers = new ArrayList<>(reducers.length);
-
-                for (int r : reducers)
-                    this.reducers.add(r);
-            }
-        }
-
-        /**
-         * @return Communication process descriptor.
-         */
-        private HadoopProcessDescriptor descriptor() {
-            return procDesc;
-        }
-
-        /**
-         * @return Job ID.
-         */
-        public HadoopJobId jobId() {
-            return jobId;
-        }
-
-        /**
-         * Initialized callback.
-         *
-         * @param proc Java process representation.
-         * @param procDesc Process descriptor.
-         */
-        private void onInitialized(Process proc, HadoopProcessDescriptor procDesc) {
-            this.proc = proc;
-            this.procDesc = procDesc;
-        }
-
-        /**
-         * Terminates process (kills it).
-         */
-        private void terminate() {
-            // Guard against concurrent message sending.
-            lock();
-
-            try {
-                terminated = true;
-
-                if (!initFut.isDone())
-                    initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
-                        @Override public void apply(
-                            IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
-                            proc.destroy();
-                        }
-                    });
-                else
-                    proc.destroy();
-            }
-            finally {
-                unlock();
-            }
-        }
-
-        /**
-         * @return Terminated flag.
-         */
-        private boolean terminated() {
-            return terminated;
-        }
-
-        /**
-         * Sets process tasks.
-         *
-         * @param tasks Tasks to set.
-         */
-        private void addTasks(Collection<HadoopTaskInfo> tasks) {
-            this.tasks.addAll(tasks);
-        }
-
-        /**
-         * Removes task when it was completed.
-         *
-         * @param task Task to remove.
-         */
-        private void removeTask(HadoopTaskInfo task) {
-            if (tasks != null)
-                tasks.remove(task);
-        }
-
-        /**
-         * @return Collection of tasks.
-         */
-        private Collection<HadoopTaskInfo> tasks() {
-            return tasks;
-        }
-
-        /**
-         * @return Planned reducers.
-         */
-        private Collection<Integer> reducers() {
-            return reducers;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(HadoopProcess.class, this);
-        }
-    }
-
-    /**
-     *
-     */
-    private class HadoopProcessFuture extends GridFutureAdapter<IgniteBiTuple<Process, HadoopProcessDescriptor>> {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Child process ID. */
-        private UUID childProcId;
-
-        /** Job ID. */
-        private HadoopJobId jobId;
-
-        /** Process descriptor. */
-        private HadoopProcessDescriptor desc;
-
-        /** Running process. */
-        private Process proc;
-
-        /** Process started flag. */
-        private volatile boolean procStarted;
-
-        /** Reply received flag. */
-        private volatile boolean replyReceived;
-
-        /** Logger. */
-        private final IgniteLogger log = HadoopExternalTaskExecutor.this.log;
-
-        /**
-         */
-        private HadoopProcessFuture(UUID childProcId, HadoopJobId jobId) {
-            this.childProcId = childProcId;
-            this.jobId = jobId;
-        }
-
-        /**
-         * Process started callback.
-         */
-        public void onProcessStarted(Process proc) {
-            this.proc = proc;
-
-            procStarted = true;
-
-            if (procStarted && replyReceived)
-                onDone(F.t(proc, desc));
-        }
-
-        /**
-         * Reply received callback.
-         */
-        public void onReplyReceived(HadoopProcessDescriptor desc) {
-            assert childProcId.equals(desc.processId());
-
-            this.desc = desc;
-
-            replyReceived = true;
-
-            if (procStarted && replyReceived)
-                onDone(F.t(proc, desc));
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean onDone(@Nullable IgniteBiTuple<Process, HadoopProcessDescriptor> res,
-            @Nullable Throwable err) {
-            if (err == null) {
-                HadoopProcess proc = runningProcsByProcId.get(childProcId);
-
-                assert proc != null;
-
-                assert proc.initFut == this;
-
-                proc.onInitialized(res.get1(), res.get2());
-
-                if (!F.isEmpty(proc.reducers()))
-                    jobTracker.onExternalMappersInitialized(jobId, proc.reducers(), desc);
-            }
-            else {
-                // Clean up since init failed.
-                runningProcsByJobId.remove(jobId);
-                runningProcsByProcId.remove(childProcId);
-            }
-
-            if (super.onDone(res, err)) {
-                if (err == null) {
-                    if (log.isDebugEnabled())
-                        log.debug("Initialized child process for external task execution [jobId=" + jobId +
-                            ", desc=" + desc + ", initTime=" + duration() + ']');
-                }
-                else
-                    U.error(log, "Failed to initialize child process for external task execution [jobId=" + jobId +
-                        ", desc=" + desc + ']', err);
-
-                return true;
-            }
-
-            return false;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
deleted file mode 100644
index 27b0329..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.util.Collection;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * External task metadata (classpath, JVM options) needed to start external process execution.
- */
-public class HadoopExternalTaskMetadata {
-    /** Process classpath. */
-    private Collection<String> classpath;
-
-    /** JVM options. */
-    @GridToStringInclude
-    private Collection<String> jvmOpts;
-
-    /**
-     * @return JVM Options.
-     */
-    public Collection<String> jvmOptions() {
-        return jvmOpts;
-    }
-
-    /**
-     * @param jvmOpts JVM options.
-     */
-    public void jvmOptions(Collection<String> jvmOpts) {
-        this.jvmOpts = jvmOpts;
-    }
-
-    /**
-     * @return Classpath.
-     */
-    public Collection<String> classpath() {
-        return classpath;
-    }
-
-    /**
-     * @param classpath Classpath.
-     */
-    public void classpath(Collection<String> classpath) {
-        this.classpath = classpath;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopExternalTaskMetadata.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
deleted file mode 100644
index 96b3675..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Job info update request.
- */
-public class HadoopJobInfoUpdateRequest implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Job ID. */
-    @GridToStringInclude
-    private HadoopJobId jobId;
-
-    /** Job phase. */
-    @GridToStringInclude
-    private HadoopJobPhase jobPhase;
-
-    /** Reducers addresses. */
-    @GridToStringInclude
-    private HadoopProcessDescriptor[] reducersAddrs;
-
-    /**
-     * Constructor required by {@link Externalizable}.
-     */
-    public HadoopJobInfoUpdateRequest() {
-        // No-op.
-    }
-
-    /**
-     * @param jobId Job ID.
-     * @param jobPhase Job phase.
-     * @param reducersAddrs Reducers addresses.
-     */
-    public HadoopJobInfoUpdateRequest(HadoopJobId jobId, HadoopJobPhase jobPhase,
-        HadoopProcessDescriptor[] reducersAddrs) {
-        assert jobId != null;
-
-        this.jobId = jobId;
-        this.jobPhase = jobPhase;
-        this.reducersAddrs = reducersAddrs;
-    }
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /**
-     * @return Job phase.
-     */
-    public HadoopJobPhase jobPhase() {
-        return jobPhase;
-    }
-
-    /**
-     * @return Reducers addresses.
-     */
-    public HadoopProcessDescriptor[] reducersAddresses() {
-        return reducersAddrs;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        jobId.writeExternal(out);
-
-        out.writeObject(jobPhase);
-        U.writeArray(out, reducersAddrs);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobId = new HadoopJobId();
-        jobId.readExternal(in);
-
-        jobPhase = (HadoopJobPhase)in.readObject();
-        reducersAddrs = (HadoopProcessDescriptor[])U.readArray(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopJobInfoUpdateRequest.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
deleted file mode 100644
index 43bdc36..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Child process initialization request.
- */
-public class HadoopPrepareForJobRequest implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Job ID. */
-    @GridToStringInclude
-    private HadoopJobId jobId;
-
-    /** Job info. */
-    @GridToStringInclude
-    private HadoopJobInfo jobInfo;
-
-    /** Total amount of reducers in the job. */
-    @GridToStringInclude
-    private int totalReducersCnt;
-
-    /** Reducers to be executed on current node. */
-    @GridToStringInclude
-    private int[] locReducers;
-
-    /**
-     * Constructor required by {@link Externalizable}.
-     */
-    public HadoopPrepareForJobRequest() {
-        // No-op.
-    }
-
-    /**
-     * @param jobId Job ID.
-     * @param jobInfo Job info.
-     * @param totalReducersCnt Number of reducers in the job.
-     * @param locReducers Reducers to be executed on current node.
-     */
-    public HadoopPrepareForJobRequest(HadoopJobId jobId, HadoopJobInfo jobInfo, int totalReducersCnt,
-        int[] locReducers) {
-        assert jobId != null;
-
-        this.jobId = jobId;
-        this.jobInfo = jobInfo;
-        this.totalReducersCnt = totalReducersCnt;
-        this.locReducers = locReducers;
-    }
-
-    /**
-     * @return Job info.
-     */
-    public HadoopJobInfo jobInfo() {
-        return jobInfo;
-    }
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /**
-     * @return Reducers to be executed on current node.
-     */
-    public int[] localReducers() {
-        return locReducers;
-    }
-
-    /**
-     * @return Number of reducers in job.
-     */
-    public int totalReducerCount() {
-        return totalReducersCnt;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        jobId.writeExternal(out);
-
-        out.writeObject(jobInfo);
-        out.writeInt(totalReducersCnt);
-
-        U.writeIntArray(out, locReducers);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobId = new HadoopJobId();
-        jobId.readExternal(in);
-
-        jobInfo = (HadoopJobInfo)in.readObject();
-        totalReducersCnt = in.readInt();
-
-        locReducers = U.readIntArray(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopPrepareForJobRequest.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
deleted file mode 100644
index 2dc233b..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.Serializable;
-import java.util.UUID;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Process descriptor used to identify process for which task is running.
- */
-public class HadoopProcessDescriptor implements Serializable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Parent node ID. */
-    private UUID parentNodeId;
-
-    /** Process ID. */
-    private UUID procId;
-
-    /** Address. */
-    private String addr;
-
-    /** TCP port. */
-    private int tcpPort;
-
-    /** Shared memory port. */
-    private int shmemPort;
-
-    /**
-     * @param parentNodeId Parent node ID.
-     * @param procId Process ID.
-     */
-    public HadoopProcessDescriptor(UUID parentNodeId, UUID procId) {
-        this.parentNodeId = parentNodeId;
-        this.procId = procId;
-    }
-
-    /**
-     * Gets process ID.
-     *
-     * @return Process ID.
-     */
-    public UUID processId() {
-        return procId;
-    }
-
-    /**
-     * Gets parent node ID.
-     *
-     * @return Parent node ID.
-     */
-    public UUID parentNodeId() {
-        return parentNodeId;
-    }
-
-    /**
-     * Gets host address.
-     *
-     * @return Host address.
-     */
-    public String address() {
-        return addr;
-    }
-
-    /**
-     * Sets host address.
-     *
-     * @param addr Host address.
-     */
-    public void address(String addr) {
-        this.addr = addr;
-    }
-
-    /**
-     * @return Shared memory port.
-     */
-    public int sharedMemoryPort() {
-        return shmemPort;
-    }
-
-    /**
-     * Sets shared memory port.
-     *
-     * @param shmemPort Shared memory port.
-     */
-    public void sharedMemoryPort(int shmemPort) {
-        this.shmemPort = shmemPort;
-    }
-
-    /**
-     * @return TCP port.
-     */
-    public int tcpPort() {
-        return tcpPort;
-    }
-
-    /**
-     * Sets TCP port.
-     *
-     * @param tcpPort TCP port.
-     */
-    public void tcpPort(int tcpPort) {
-        this.tcpPort = tcpPort;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o)
-            return true;
-
-        if (!(o instanceof HadoopProcessDescriptor))
-            return false;
-
-        HadoopProcessDescriptor that = (HadoopProcessDescriptor)o;
-
-        return parentNodeId.equals(that.parentNodeId) && procId.equals(that.procId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        int result = parentNodeId.hashCode();
-
-        result = 31 * result + procId.hashCode();
-
-        return result;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopProcessDescriptor.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
deleted file mode 100644
index b35f3ec..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Process started message.
- */
-public class HadoopProcessStartedAck implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopProcessStartedAck.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
deleted file mode 100644
index 3875304..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Collection;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Message sent from node to child process to start task(s) execution.
- */
-public class HadoopTaskExecutionRequest implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Job ID. */
-    @GridToStringInclude
-    private HadoopJobId jobId;
-
-    /** Job info. */
-    @GridToStringInclude
-    private HadoopJobInfo jobInfo;
-
-    /** Mappers. */
-    @GridToStringInclude
-    private Collection<HadoopTaskInfo> tasks;
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /**
-     * @param jobId Job ID.
-     */
-    public void jobId(HadoopJobId jobId) {
-        this.jobId = jobId;
-    }
-
-    /**
-     * @return Jon info.
-     */
-    public HadoopJobInfo jobInfo() {
-        return jobInfo;
-    }
-
-    /**
-     * @param jobInfo Job info.
-     */
-    public void jobInfo(HadoopJobInfo jobInfo) {
-        this.jobInfo = jobInfo;
-    }
-
-    /**
-     * @return Tasks.
-     */
-    public Collection<HadoopTaskInfo> tasks() {
-        return tasks;
-    }
-
-    /**
-     * @param tasks Tasks.
-     */
-    public void tasks(Collection<HadoopTaskInfo> tasks) {
-        this.tasks = tasks;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopTaskExecutionRequest.class, this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        jobId.writeExternal(out);
-
-        out.writeObject(jobInfo);
-        U.writeCollection(out, tasks);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        jobId = new HadoopJobId();
-        jobId.readExternal(in);
-
-        jobInfo = (HadoopJobInfo)in.readObject();
-        tasks = U.readCollection(in);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
deleted file mode 100644
index 9e1fdb3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * Task finished message. Sent when local task finishes execution.
- */
-public class HadoopTaskFinishedMessage implements HadoopMessage {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Finished task info. */
-    private HadoopTaskInfo taskInfo;
-
-    /** Task finish status. */
-    private HadoopTaskStatus status;
-
-    /**
-     * Constructor required by {@link Externalizable}.
-     */
-    public HadoopTaskFinishedMessage() {
-        // No-op.
-    }
-
-    /**
-     * @param taskInfo Finished task info.
-     * @param status Task finish status.
-     */
-    public HadoopTaskFinishedMessage(HadoopTaskInfo taskInfo, HadoopTaskStatus status) {
-        assert taskInfo != null;
-        assert status != null;
-
-        this.taskInfo = taskInfo;
-        this.status = status;
-    }
-
-    /**
-     * @return Finished task info.
-     */
-    public HadoopTaskInfo taskInfo() {
-        return taskInfo;
-    }
-
-    /**
-     * @return Task finish status.
-     */
-    public HadoopTaskStatus status() {
-        return status;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopTaskFinishedMessage.class, this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        taskInfo.writeExternal(out);
-        status.writeExternal(out);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        taskInfo = new HadoopTaskInfo();
-        taskInfo.readExternal(in);
-
-        status = new HadoopTaskStatus();
-        status.readExternal(in);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
deleted file mode 100644
index 4a946e9..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
-import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleAck;
-import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleJob;
-import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopExecutorService;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopRunnableTask;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopJobInfoUpdateRequest;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopPrepareForJobRequest;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessStartedAck;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopTaskExecutionRequest;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopTaskFinishedMessage;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopMessageListener;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.CI1;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.REDUCE;
-
-/**
- * Hadoop process base.
- */
-@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-public class HadoopChildProcessRunner {
-    /** Node process descriptor. */
-    private HadoopProcessDescriptor nodeDesc;
-
-    /** Message processing executor service. */
-    private ExecutorService msgExecSvc;
-
-    /** Task executor service. */
-    private HadoopExecutorService execSvc;
-
-    /** */
-    protected GridUnsafeMemory mem = new GridUnsafeMemory(0);
-
-    /** External communication. */
-    private HadoopExternalCommunication comm;
-
-    /** Logger. */
-    private IgniteLogger log;
-
-    /** Init guard. */
-    private final AtomicBoolean initGuard = new AtomicBoolean();
-
-    /** Start time. */
-    private long startTime;
-
-    /** Init future. */
-    private final GridFutureAdapter<?> initFut = new GridFutureAdapter<>();
-
-    /** Job instance. */
-    private HadoopJob job;
-
-    /** Number of uncompleted tasks. */
-    private final AtomicInteger pendingTasks = new AtomicInteger();
-
-    /** Shuffle job. */
-    private HadoopShuffleJob<HadoopProcessDescriptor> shuffleJob;
-
-    /** Concurrent mappers. */
-    private int concMappers;
-
-    /** Concurrent reducers. */
-    private int concReducers;
-
-    /**
-     * Starts child process runner.
-     */
-    public void start(HadoopExternalCommunication comm, HadoopProcessDescriptor nodeDesc,
-        ExecutorService msgExecSvc, IgniteLogger parentLog)
-        throws IgniteCheckedException {
-        this.comm = comm;
-        this.nodeDesc = nodeDesc;
-        this.msgExecSvc = msgExecSvc;
-
-        comm.setListener(new MessageListener());
-        log = parentLog.getLogger(HadoopChildProcessRunner.class);
-
-        startTime = U.currentTimeMillis();
-
-        // At this point node knows that this process has started.
-        comm.sendMessage(this.nodeDesc, new HadoopProcessStartedAck());
-    }
-
-    /**
-     * Initializes process for task execution.
-     *
-     * @param req Initialization request.
-     */
-    private void prepareProcess(HadoopPrepareForJobRequest req) {
-        if (initGuard.compareAndSet(false, true)) {
-            try {
-                if (log.isDebugEnabled())
-                    log.debug("Initializing external hadoop task: " + req);
-
-                assert job == null;
-
-                job = req.jobInfo().createJob(HadoopV2Job.class, req.jobId(), log, null);
-
-                job.initialize(true, nodeDesc.processId());
-
-                shuffleJob = new HadoopShuffleJob<>(comm.localProcessDescriptor(), log, job, mem,
-                    req.totalReducerCount(), req.localReducers());
-
-                initializeExecutors(req);
-
-                if (log.isDebugEnabled())
-                    log.debug("External process initialized [initWaitTime=" +
-                        (U.currentTimeMillis() - startTime) + ']');
-
-                initFut.onDone();
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to initialize process: " + req, e);
-
-                initFut.onDone(e);
-            }
-        }
-        else
-            log.warning("Duplicate initialize process request received (will ignore): " + req);
-    }
-
-    /**
-     * @param req Task execution request.
-     */
-    private void runTasks(final HadoopTaskExecutionRequest req) {
-        if (!initFut.isDone() && log.isDebugEnabled())
-            log.debug("Will wait for process initialization future completion: " + req);
-
-        initFut.listen(new CI1<IgniteInternalFuture<?>>() {
-            @Override public void apply(IgniteInternalFuture<?> f) {
-                try {
-                    // Make sure init was successful.
-                    f.get();
-
-                    boolean set = pendingTasks.compareAndSet(0, req.tasks().size());
-
-                    assert set;
-
-                    HadoopTaskInfo info = F.first(req.tasks());
-
-                    assert info != null;
-
-                    int size = info.type() == MAP ? concMappers : concReducers;
-
-//                    execSvc.setCorePoolSize(size);
-//                    execSvc.setMaximumPoolSize(size);
-
-                    if (log.isDebugEnabled())
-                        log.debug("Set executor service size for task type [type=" + info.type() +
-                            ", size=" + size + ']');
-
-                    for (HadoopTaskInfo taskInfo : req.tasks()) {
-                        if (log.isDebugEnabled())
-                            log.debug("Submitted task for external execution: " + taskInfo);
-
-                        execSvc.submit(new HadoopRunnableTask(log, job, mem, taskInfo, nodeDesc.parentNodeId()) {
-                            @Override protected void onTaskFinished(HadoopTaskStatus status) {
-                                onTaskFinished0(this, status);
-                            }
-
-                            @Override protected HadoopTaskInput createInput(HadoopTaskContext ctx)
-                                throws IgniteCheckedException {
-                                return shuffleJob.input(ctx);
-                            }
-
-                            @Override protected HadoopTaskOutput createOutput(HadoopTaskContext ctx)
-                                throws IgniteCheckedException {
-                                return shuffleJob.output(ctx);
-                            }
-                        });
-                    }
-                }
-                catch (IgniteCheckedException e) {
-                    for (HadoopTaskInfo info : req.tasks())
-                        notifyTaskFinished(info, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
-                }
-            }
-        });
-    }
-
-    /**
-     * Creates executor services.
-     *
-     * @param req Init child process request.
-     */
-    private void initializeExecutors(HadoopPrepareForJobRequest req) {
-        int cpus = Runtime.getRuntime().availableProcessors();
-//
-//        concMappers = get(req.jobInfo(), EXTERNAL_CONCURRENT_MAPPERS, cpus);
-//        concReducers = get(req.jobInfo(), EXTERNAL_CONCURRENT_REDUCERS, cpus);
-
-        execSvc = new HadoopExecutorService(log, "", cpus * 2, 1024);
-    }
-
-    /**
-     * Updates external process map so that shuffle can proceed with sending messages to reducers.
-     *
-     * @param req Update request.
-     */
-    private void updateTasks(final HadoopJobInfoUpdateRequest req) {
-        initFut.listen(new CI1<IgniteInternalFuture<?>>() {
-            @Override public void apply(IgniteInternalFuture<?> gridFut) {
-                assert initGuard.get();
-
-                assert req.jobId().equals(job.id());
-
-                if (req.reducersAddresses() != null) {
-                    if (shuffleJob.initializeReduceAddresses(req.reducersAddresses())) {
-                        shuffleJob.startSending("external",
-                            new IgniteInClosure2X<HadoopProcessDescriptor, HadoopShuffleMessage>() {
-                                @Override public void applyx(HadoopProcessDescriptor dest,
-                                    HadoopShuffleMessage msg) throws IgniteCheckedException {
-                                    comm.sendMessage(dest, msg);
-                                }
-                            });
-                    }
-                }
-            }
-        });
-    }
-
-    /**
-     * Stops all executors and running tasks.
-     */
-    private void shutdown() {
-        if (execSvc != null)
-            execSvc.shutdown(5000);
-
-        if (msgExecSvc != null)
-            msgExecSvc.shutdownNow();
-
-        try {
-            job.dispose(true);
-        }
-        catch (IgniteCheckedException e) {
-            U.error(log, "Failed to dispose job.", e);
-        }
-    }
-
-    /**
-     * Notifies node about task finish.
-     *
-     * @param run Finished task runnable.
-     * @param status Task status.
-     */
-    private void onTaskFinished0(HadoopRunnableTask run, HadoopTaskStatus status) {
-        HadoopTaskInfo info = run.taskInfo();
-
-        int pendingTasks0 = pendingTasks.decrementAndGet();
-
-        if (log.isDebugEnabled())
-            log.debug("Hadoop task execution finished [info=" + info
-                + ", state=" + status.state() + ", waitTime=" + run.waitTime() + ", execTime=" + run.executionTime() +
-                ", pendingTasks=" + pendingTasks0 +
-                ", err=" + status.failCause() + ']');
-
-        assert info.type() == MAP || info.type() == REDUCE : "Only MAP or REDUCE tasks are supported.";
-
-        boolean flush = pendingTasks0 == 0 && info.type() == MAP;
-
-        notifyTaskFinished(info, status, flush);
-    }
-
-    /**
-     * @param taskInfo Finished task info.
-     * @param status Task status.
-     */
-    private void notifyTaskFinished(final HadoopTaskInfo taskInfo, final HadoopTaskStatus status,
-        boolean flush) {
-
-        final HadoopTaskState state = status.state();
-        final Throwable err = status.failCause();
-
-        if (!flush) {
-            try {
-                if (log.isDebugEnabled())
-                    log.debug("Sending notification to parent node [taskInfo=" + taskInfo + ", state=" + state +
-                        ", err=" + err + ']');
-
-                comm.sendMessage(nodeDesc, new HadoopTaskFinishedMessage(taskInfo, status));
-            }
-            catch (IgniteCheckedException e) {
-                log.error("Failed to send message to parent node (will terminate child process).", e);
-
-                shutdown();
-
-                terminate();
-            }
-        }
-        else {
-            if (log.isDebugEnabled())
-                log.debug("Flushing shuffle messages before sending last task completion notification [taskInfo=" +
-                    taskInfo + ", state=" + state + ", err=" + err + ']');
-
-            final long start = U.currentTimeMillis();
-
-            try {
-                shuffleJob.flush().listen(new CI1<IgniteInternalFuture<?>>() {
-                    @Override public void apply(IgniteInternalFuture<?> f) {
-                        long end = U.currentTimeMillis();
-
-                        if (log.isDebugEnabled())
-                            log.debug("Finished flushing shuffle messages [taskInfo=" + taskInfo +
-                                ", flushTime=" + (end - start) + ']');
-
-                        try {
-                            // Check for errors on shuffle.
-                            f.get();
-
-                            notifyTaskFinished(taskInfo, status, false);
-                        }
-                        catch (IgniteCheckedException e) {
-                            log.error("Failed to flush shuffle messages (will fail the task) [taskInfo=" + taskInfo +
-                                ", state=" + state + ", err=" + err + ']', e);
-
-                            notifyTaskFinished(taskInfo,
-                                new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
-                        }
-                    }
-                });
-            }
-            catch (IgniteCheckedException e) {
-                log.error("Failed to flush shuffle messages (will fail the task) [taskInfo=" + taskInfo +
-                    ", state=" + state + ", err=" + err + ']', e);
-
-                notifyTaskFinished(taskInfo, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
-            }
-        }
-    }
-
-    /**
-     * Checks if message was received from parent node and prints warning if not.
-     *
-     * @param desc Sender process ID.
-     * @param msg Received message.
-     * @return {@code True} if received from parent node.
-     */
-    private boolean validateNodeMessage(HadoopProcessDescriptor desc, HadoopMessage msg) {
-        if (!nodeDesc.processId().equals(desc.processId())) {
-            log.warning("Received process control request from unknown process (will ignore) [desc=" + desc +
-                ", msg=" + msg + ']');
-
-            return false;
-        }
-
-        return true;
-    }
-
-    /**
-     * Stops execution of this process.
-     */
-    private void terminate() {
-        System.exit(1);
-    }
-
-    /**
-     * Message listener.
-     */
-    private class MessageListener implements HadoopMessageListener {
-        /** {@inheritDoc} */
-        @Override public void onMessageReceived(final HadoopProcessDescriptor desc, final HadoopMessage msg) {
-            if (msg instanceof HadoopTaskExecutionRequest) {
-                if (validateNodeMessage(desc, msg))
-                    runTasks((HadoopTaskExecutionRequest)msg);
-            }
-            else if (msg instanceof HadoopJobInfoUpdateRequest) {
-                if (validateNodeMessage(desc, msg))
-                    updateTasks((HadoopJobInfoUpdateRequest)msg);
-            }
-            else if (msg instanceof HadoopPrepareForJobRequest) {
-                if (validateNodeMessage(desc, msg))
-                    prepareProcess((HadoopPrepareForJobRequest)msg);
-            }
-            else if (msg instanceof HadoopShuffleMessage) {
-                if (log.isTraceEnabled())
-                    log.trace("Received shuffle message [desc=" + desc + ", msg=" + msg + ']');
-
-                initFut.listen(new CI1<IgniteInternalFuture<?>>() {
-                    @Override public void apply(IgniteInternalFuture<?> f) {
-                        try {
-                            HadoopShuffleMessage m = (HadoopShuffleMessage)msg;
-
-                            shuffleJob.onShuffleMessage(m);
-
-                            comm.sendMessage(desc, new HadoopShuffleAck(m.id(), m.jobId()));
-                        }
-                        catch (IgniteCheckedException e) {
-                            U.error(log, "Failed to process hadoop shuffle message [desc=" + desc + ", msg=" + msg + ']', e);
-                        }
-                    }
-                });
-            }
-            else if (msg instanceof HadoopShuffleAck) {
-                if (log.isTraceEnabled())
-                    log.trace("Received shuffle ack [desc=" + desc + ", msg=" + msg + ']');
-
-                shuffleJob.onShuffleAck((HadoopShuffleAck)msg);
-            }
-            else
-                log.warning("Unknown message received (will ignore) [desc=" + desc + ", msg=" + msg + ']');
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onConnectionLost(HadoopProcessDescriptor desc) {
-            if (log.isDebugEnabled())
-                log.debug("Lost connection with remote process: " + desc);
-
-            if (desc == null)
-                U.warn(log, "Handshake failed.");
-            else if (desc.processId().equals(nodeDesc.processId())) {
-                log.warning("Child process lost connection with parent node (will terminate child process).");
-
-                shutdown();
-
-                terminate();
-            }
-        }
-    }
-}
\ No newline at end of file


[19/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
deleted file mode 100644
index 57a853f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.FileSystemCounter;
-import org.apache.hadoop.mapreduce.counters.AbstractCounters;
-import org.apache.hadoop.mapreduce.counters.Limits;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopLongCounter;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Counter;
-import org.apache.ignite.internal.util.typedef.T2;
-
-/**
- * Hadoop counters adapter.
- */
-public class HadoopMapReduceCounters extends Counters {
-    /** */
-    private final Map<T2<String,String>,HadoopLongCounter> cntrs = new HashMap<>();
-
-    /**
-     * Creates new instance based on given counters.
-     *
-     * @param cntrs Counters to adapt.
-     */
-    public HadoopMapReduceCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters cntrs) {
-        for (HadoopCounter cntr : cntrs.all())
-            if (cntr instanceof HadoopLongCounter)
-                this.cntrs.put(new T2<>(cntr.group(), cntr.name()), (HadoopLongCounter) cntr);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized CounterGroup addGroup(CounterGroup grp) {
-        return addGroup(grp.getName(), grp.getDisplayName());
-    }
-
-    /** {@inheritDoc} */
-    @Override public CounterGroup addGroup(String name, String displayName) {
-        return new HadoopMapReduceCounterGroup(this, name);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counter findCounter(String grpName, String cntrName) {
-        return findCounter(grpName, cntrName, true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized Counter findCounter(Enum<?> key) {
-        return findCounter(key.getDeclaringClass().getName(), key.name(), true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized Counter findCounter(String scheme, FileSystemCounter key) {
-        return findCounter(String.format("FileSystem Counter (%s)", scheme), key.name());
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized Iterable<String> getGroupNames() {
-        Collection<String> res = new HashSet<>();
-
-        for (HadoopCounter counter : cntrs.values())
-            res.add(counter.group());
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Iterator<CounterGroup> iterator() {
-        final Iterator<String> iter = getGroupNames().iterator();
-
-        return new Iterator<CounterGroup>() {
-            @Override public boolean hasNext() {
-                return iter.hasNext();
-            }
-
-            @Override public CounterGroup next() {
-                if (!hasNext())
-                    throw new NoSuchElementException();
-
-                return new HadoopMapReduceCounterGroup(HadoopMapReduceCounters.this, iter.next());
-            }
-
-            @Override public void remove() {
-                throw new UnsupportedOperationException("not implemented");
-            }
-        };
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized CounterGroup getGroup(String grpName) {
-        return new HadoopMapReduceCounterGroup(this, grpName);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int countCounters() {
-        return cntrs.size();
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void write(DataOutput out) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void readFields(DataInput in) throws IOException {
-        throw new UnsupportedOperationException("not implemented");
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void incrAllCounters(AbstractCounters<Counter, CounterGroup> other) {
-        for (CounterGroup group : other) {
-            for (Counter counter : group) {
-                findCounter(group.getName(), counter.getName()).increment(counter.getValue());
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object genericRight) {
-        if (!(genericRight instanceof HadoopMapReduceCounters))
-            return false;
-
-        return cntrs.equals(((HadoopMapReduceCounters) genericRight).cntrs);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return cntrs.hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setWriteAllCounters(boolean snd) {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean getWriteAllCounters() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Limits limits() {
-        return null;
-    }
-
-    /**
-     * Returns size of a group.
-     *
-     * @param grpName Name of the group.
-     * @return amount of counters in the given group.
-     */
-    public int groupSize(String grpName) {
-        int res = 0;
-
-        for (HadoopCounter counter : cntrs.values()) {
-            if (grpName.equals(counter.group()))
-                res++;
-        }
-
-        return res;
-    }
-
-    /**
-     * Returns counters iterator for specified group.
-     *
-     * @param grpName Name of the group to iterate.
-     * @return Counters iterator.
-     */
-    public Iterator<Counter> iterateGroup(String grpName) {
-        Collection<Counter> grpCounters = new ArrayList<>();
-
-        for (HadoopLongCounter counter : cntrs.values()) {
-            if (grpName.equals(counter.group()))
-                grpCounters.add(new HadoopV2Counter(counter));
-        }
-
-        return grpCounters.iterator();
-    }
-
-    /**
-     * Find a counter in the group.
-     *
-     * @param grpName The name of the counter group.
-     * @param cntrName The name of the counter.
-     * @param create Create the counter if not found if true.
-     * @return The counter that was found or added or {@code null} if create is false.
-     */
-    public Counter findCounter(String grpName, String cntrName, boolean create) {
-        T2<String, String> key = new T2<>(grpName, cntrName);
-
-        HadoopLongCounter internalCntr = cntrs.get(key);
-
-        if (internalCntr == null & create) {
-            internalCntr = new HadoopLongCounter(grpName,cntrName);
-
-            cntrs.put(key, new HadoopLongCounter(grpName,cntrName));
-        }
-
-        return internalCntr == null ? null : new HadoopV2Counter(internalCntr);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
deleted file mode 100644
index b9c20c3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.hadoop.mapreduce.IgniteHadoopMapReducePlanner;
-import org.apache.ignite.internal.GridKernalContext;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
-import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffle;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopEmbeddedTaskExecutor;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Hadoop processor.
- */
-public class HadoopProcessor extends HadoopProcessorAdapter {
-    /** Job ID counter. */
-    private final AtomicInteger idCtr = new AtomicInteger();
-
-    /** Hadoop context. */
-    @GridToStringExclude
-    private HadoopContext hctx;
-
-    /** Hadoop facade for public API. */
-    @GridToStringExclude
-    private Hadoop hadoop;
-
-    /**
-     * Constructor.
-     *
-     * @param ctx Kernal context.
-     */
-    public HadoopProcessor(GridKernalContext ctx) {
-        super(ctx);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteCheckedException {
-        if (ctx.isDaemon())
-            return;
-
-        HadoopConfiguration cfg = ctx.config().getHadoopConfiguration();
-
-        if (cfg == null)
-            cfg = new HadoopConfiguration();
-        else
-            cfg = new HadoopConfiguration(cfg);
-
-        initializeDefaults(cfg);
-
-        hctx = new HadoopContext(
-            ctx,
-            cfg,
-            new HadoopJobTracker(),
-            new HadoopEmbeddedTaskExecutor(),
-            // TODO: IGNITE-404: Uncomment when fixed.
-            //cfg.isExternalExecution() ? new HadoopExternalTaskExecutor() : new HadoopEmbeddedTaskExecutor(),
-            new HadoopShuffle());
-
-        for (HadoopComponent c : hctx.components())
-            c.start(hctx);
-
-        hadoop = new HadoopImpl(this);
-
-        ctx.addNodeAttribute(HadoopAttributes.NAME, new HadoopAttributes(cfg));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onKernalStart() throws IgniteCheckedException {
-        super.onKernalStart();
-
-        if (hctx == null)
-            return;
-
-        for (HadoopComponent c : hctx.components())
-            c.onKernalStart();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onKernalStop(boolean cancel) {
-        super.onKernalStop(cancel);
-
-        if (hctx == null)
-            return;
-
-        List<HadoopComponent> components = hctx.components();
-
-        for (ListIterator<HadoopComponent> it = components.listIterator(components.size()); it.hasPrevious();) {
-            HadoopComponent c = it.previous();
-
-            c.onKernalStop(cancel);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop(boolean cancel) throws IgniteCheckedException {
-        super.stop(cancel);
-
-        if (hctx == null)
-            return;
-
-        List<HadoopComponent> components = hctx.components();
-
-        for (ListIterator<HadoopComponent> it = components.listIterator(components.size()); it.hasPrevious();) {
-            HadoopComponent c = it.previous();
-
-            c.stop(cancel);
-        }
-    }
-
-    /**
-     * Gets Hadoop context.
-     *
-     * @return Hadoop context.
-     */
-    public HadoopContext context() {
-        return hctx;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Hadoop hadoop() {
-        if (hadoop == null)
-            throw new IllegalStateException("Hadoop accelerator is disabled (Hadoop is not in classpath, " +
-                "is HADOOP_HOME environment variable set?)");
-
-        return hadoop;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration config() {
-        return hctx.configuration();
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobId nextJobId() {
-        return new HadoopJobId(ctx.localNodeId(), idCtr.incrementAndGet());
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<?> submit(HadoopJobId jobId, HadoopJobInfo jobInfo) {
-        return hctx.jobTracker().submit(jobId, jobInfo);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
-        return hctx.jobTracker().status(jobId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException {
-        return hctx.jobTracker().jobCounters(jobId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
-        return hctx.jobTracker().finishFuture(jobId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException {
-        return hctx.jobTracker().killJob(jobId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void validateEnvironment() throws IgniteCheckedException {
-        // Perform some static checks as early as possible, so that any recoverable exceptions are thrown here.
-        try {
-            HadoopLocations loc = HadoopClasspathUtils.locations();
-
-            if (!F.isEmpty(loc.home()))
-                U.quietAndInfo(log, HadoopClasspathUtils.HOME + " is set to " + loc.home());
-
-            U.quietAndInfo(log, "Resolved Hadoop classpath locations: " + loc.common() + ", " + loc.hdfs() + ", " +
-                loc.mapred());
-        }
-        catch (IOException ioe) {
-            throw new IgniteCheckedException(ioe.getMessage(), ioe);
-        }
-
-        HadoopClassLoader.hadoopUrls();
-    }
-
-    /**
-     * Initializes default hadoop configuration.
-     *
-     * @param cfg Hadoop configuration.
-     */
-    private void initializeDefaults(HadoopConfiguration cfg) {
-        if (cfg.getMapReducePlanner() == null)
-            cfg.setMapReducePlanner(new IgniteHadoopMapReducePlanner());
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopProcessor.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
deleted file mode 100644
index 2e75e5f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileWriter;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.URL;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Date;
-import java.util.Scanner;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR;
-import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT;
-
-/**
- * Setup tool to configure Hadoop client.
- */
-public class HadoopSetup {
-    /** */
-    public static final String WINUTILS_EXE = "winutils.exe";
-
-    /** */
-    private static final FilenameFilter IGNITE_JARS = new FilenameFilter() {
-        @Override public boolean accept(File dir, String name) {
-            return name.startsWith("ignite-") && name.endsWith(".jar");
-        }
-    };
-
-    /**
-     * The main method.
-     * @param ignore Params.
-     */
-    public static void main(String[] ignore) {
-        X.println(
-            "   __________  ________________ ",
-            "  /  _/ ___/ |/ /  _/_  __/ __/ ",
-            " _/ // (7 7    // /  / / / _/   ",
-            "/___/\\___/_/|_/___/ /_/ /___/  ",
-            "                for Apache Hadoop        ",
-            " ",
-            "ver. " + ACK_VER_STR,
-            COPYRIGHT);
-
-        configureHadoop();
-    }
-
-    /**
-     * This operation prepares the clean unpacked Hadoop distributive to work as client with Ignite-Hadoop.
-     * It performs these operations:
-     * <ul>
-     *     <li>Check for setting of HADOOP_HOME environment variable.</li>
-     *     <li>Try to resolve HADOOP_COMMON_HOME or evaluate it relative to HADOOP_HOME.</li>
-     *     <li>In Windows check if winutils.exe exists and try to fix issue with some restrictions.</li>
-     *     <li>In Windows check new line character issues in CMD scripts.</li>
-     *     <li>Scan Hadoop lib directory to detect Ignite JARs. If these don't exist tries to create ones.</li>
-     * </ul>
-     */
-    private static void configureHadoop() {
-        String igniteHome = U.getIgniteHome();
-
-        println("IGNITE_HOME is set to '" + igniteHome + "'.");
-
-        checkIgniteHome(igniteHome);
-
-        String homeVar = "HADOOP_HOME";
-        String hadoopHome = System.getenv(homeVar);
-
-        if (F.isEmpty(hadoopHome)) {
-            homeVar = "HADOOP_PREFIX";
-            hadoopHome = System.getenv(homeVar);
-        }
-
-        if (F.isEmpty(hadoopHome))
-            exit("Neither HADOOP_HOME nor HADOOP_PREFIX environment variable is set. Please set one of them to a " +
-                "valid Hadoop installation directory and run setup tool again.", null);
-
-        hadoopHome = hadoopHome.replaceAll("\"", "");
-
-        println(homeVar + " is set to '" + hadoopHome + "'.");
-
-        String hiveHome = System.getenv("HIVE_HOME");
-
-        if (!F.isEmpty(hiveHome)) {
-            hiveHome = hiveHome.replaceAll("\"", "");
-
-            println("HIVE_HOME is set to '" + hiveHome + "'.");
-        }
-
-        File hadoopDir = new File(hadoopHome);
-
-        if (!hadoopDir.exists())
-            exit("Hadoop installation folder does not exist.", null);
-
-        if (!hadoopDir.isDirectory())
-            exit("HADOOP_HOME must point to a directory.", null);
-
-        if (!hadoopDir.canRead())
-            exit("Hadoop installation folder can not be read. Please check permissions.", null);
-
-        final File hadoopCommonDir;
-
-        String hadoopCommonHome = System.getenv("HADOOP_COMMON_HOME");
-
-        if (F.isEmpty(hadoopCommonHome)) {
-            hadoopCommonDir = new File(hadoopDir, "share/hadoop/common");
-
-            println("HADOOP_COMMON_HOME is not set, will use '" + hadoopCommonDir.getPath() + "'.");
-        }
-        else {
-            println("HADOOP_COMMON_HOME is set to '" + hadoopCommonHome + "'.");
-
-            hadoopCommonDir = new File(hadoopCommonHome);
-        }
-
-        if (!hadoopCommonDir.canRead())
-            exit("Failed to read Hadoop common dir '" + hadoopCommonDir + "'.", null);
-
-        final File hadoopCommonLibDir = new File(hadoopCommonDir, "lib");
-
-        if (!hadoopCommonLibDir.canRead())
-            exit("Failed to read Hadoop 'lib' folder in '" + hadoopCommonLibDir.getPath() + "'.", null);
-
-        if (U.isWindows()) {
-            checkJavaPathSpaces();
-
-            final File hadoopBinDir = new File(hadoopDir, "bin");
-
-            if (!hadoopBinDir.canRead())
-                exit("Failed to read subdirectory 'bin' in HADOOP_HOME.", null);
-
-            File winutilsFile = new File(hadoopBinDir, WINUTILS_EXE);
-
-            if (!winutilsFile.exists()) {
-                if (ask("File '" + WINUTILS_EXE + "' does not exist. " +
-                    "It may be replaced by a stub. Create it?")) {
-                    println("Creating file stub '" + winutilsFile.getAbsolutePath() + "'.");
-
-                    boolean ok = false;
-
-                    try {
-                        ok = winutilsFile.createNewFile();
-                    }
-                    catch (IOException ignore) {
-                        // No-op.
-                    }
-
-                    if (!ok)
-                        exit("Failed to create '" + WINUTILS_EXE + "' file. Please check permissions.", null);
-                }
-                else
-                    println("Ok. But Hadoop client probably will not work on Windows this way...");
-            }
-
-            processCmdFiles(hadoopDir, "bin", "sbin", "libexec");
-        }
-
-        File igniteLibs = new File(new File(igniteHome), "libs");
-
-        if (!igniteLibs.exists())
-            exit("Ignite 'libs' folder is not found.", null);
-
-        Collection<File> jarFiles = new ArrayList<>();
-
-        addJarsInFolder(jarFiles, igniteLibs);
-        addJarsInFolder(jarFiles, new File(igniteLibs, "ignite-hadoop"));
-
-        boolean jarsLinksCorrect = true;
-
-        for (File file : jarFiles) {
-            File link = new File(hadoopCommonLibDir, file.getName());
-
-            jarsLinksCorrect &= isJarLinkCorrect(link, file);
-
-            if (!jarsLinksCorrect)
-                break;
-        }
-
-        if (!jarsLinksCorrect) {
-            if (ask("Ignite JAR files are not found in Hadoop 'lib' directory. " +
-                "Create appropriate symbolic links?")) {
-                File[] oldIgniteJarFiles = hadoopCommonLibDir.listFiles(IGNITE_JARS);
-
-                if (oldIgniteJarFiles.length > 0 && ask("The Hadoop 'lib' directory contains JARs from other Ignite " +
-                    "installation. They must be deleted to continue. Continue?")) {
-                    for (File file : oldIgniteJarFiles) {
-                        println("Deleting file '" + file.getAbsolutePath() + "'.");
-
-                        if (!file.delete())
-                            exit("Failed to delete file '" + file.getPath() + "'.", null);
-                    }
-                }
-
-                for (File file : jarFiles) {
-                    File targetFile = new File(hadoopCommonLibDir, file.getName());
-
-                    try {
-                        println("Creating symbolic link '" + targetFile.getAbsolutePath() + "'.");
-
-                        Files.createSymbolicLink(targetFile.toPath(), file.toPath());
-                    }
-                    catch (IOException e) {
-                        if (U.isWindows()) {
-                            warn("Ability to create symbolic links is required!");
-                            warn("On Windows platform you have to grant permission 'Create symbolic links'");
-                            warn("to your user or run the Accelerator as Administrator.");
-                        }
-
-                        exit("Creating symbolic link failed! Check permissions.", e);
-                    }
-                }
-            }
-            else
-                println("Ok. But Hadoop client will not be able to talk to Ignite cluster without those JARs in classpath...");
-        }
-
-        File hadoopEtc = new File(hadoopDir, "etc" + File.separator + "hadoop");
-
-        File igniteHadoopCfg = igniteHadoopConfig(igniteHome);
-
-        if (!igniteHadoopCfg.canRead())
-            exit("Failed to read Ignite Hadoop 'config' folder at '" + igniteHadoopCfg.getAbsolutePath() + "'.", null);
-
-        if (hadoopEtc.canWrite()) { // TODO Bigtop
-            if (ask("Replace 'core-site.xml' and 'mapred-site.xml' files with preconfigured templates " +
-                "(existing files will be backed up)?")) {
-                replaceWithBackup(new File(igniteHadoopCfg, "core-site.ignite.xml"),
-                    new File(hadoopEtc, "core-site.xml"));
-
-                replaceWithBackup(new File(igniteHadoopCfg, "mapred-site.ignite.xml"),
-                    new File(hadoopEtc, "mapred-site.xml"));
-            }
-            else
-                println("Ok. You can configure them later, the templates are available at Ignite's 'docs' directory...");
-        }
-
-        if (!F.isEmpty(hiveHome)) {
-            File hiveConfDir = new File(hiveHome + File.separator + "conf");
-
-            if (!hiveConfDir.canWrite())
-                warn("Can not write to '" + hiveConfDir.getAbsolutePath() + "'. To run Hive queries you have to " +
-                    "configure 'hive-site.xml' manually. The template is available at Ignite's 'docs' directory.");
-            else if (ask("Replace 'hive-site.xml' with preconfigured template (existing file will be backed up)?"))
-                replaceWithBackup(new File(igniteHadoopCfg, "hive-site.ignite.xml"),
-                    new File(hiveConfDir, "hive-site.xml"));
-            else
-                println("Ok. You can configure it later, the template is available at Ignite's 'docs' directory...");
-        }
-
-        println("Apache Hadoop setup is complete.");
-    }
-
-    /**
-     * Get Ignite Hadoop config directory.
-     *
-     * @param igniteHome Ignite home.
-     * @return Ignite Hadoop config directory.
-     */
-    private static File igniteHadoopConfig(String igniteHome) {
-        Path path = Paths.get(igniteHome, "modules", "hadoop", "config");
-
-        if (!Files.exists(path))
-            path = Paths.get(igniteHome, "config", "hadoop");
-
-        if (Files.exists(path))
-            return path.toFile();
-        else
-            return new File(igniteHome, "docs");
-    }
-
-    /**
-     * @param jarFiles Jars.
-     * @param folder Folder.
-     */
-    private static void addJarsInFolder(Collection<File> jarFiles, File folder) {
-        if (!folder.exists())
-            exit("Folder '" + folder.getAbsolutePath() + "' is not found.", null);
-
-        jarFiles.addAll(Arrays.asList(folder.listFiles(IGNITE_JARS)));
-    }
-
-    /**
-     * Checks that JAVA_HOME does not contain space characters.
-     */
-    private static void checkJavaPathSpaces() {
-        String javaHome = System.getProperty("java.home");
-
-        if (javaHome.contains(" ")) {
-            warn("Java installation path contains space characters!");
-            warn("Hadoop client will not be able to start using '" + javaHome + "'.");
-            warn("Please install JRE to path which does not contain spaces and point JAVA_HOME to that installation.");
-        }
-    }
-
-    /**
-     * Checks Ignite home.
-     *
-     * @param igniteHome Ignite home.
-     */
-    private static void checkIgniteHome(String igniteHome) {
-        URL jarUrl = U.class.getProtectionDomain().getCodeSource().getLocation();
-
-        try {
-            Path jar = Paths.get(jarUrl.toURI());
-            Path igHome = Paths.get(igniteHome);
-
-            if (!jar.startsWith(igHome))
-                exit("Ignite JAR files are not under IGNITE_HOME.", null);
-        }
-        catch (Exception e) {
-            exit(e.getMessage(), e);
-        }
-    }
-
-    /**
-     * Replaces target file with source file.
-     *
-     * @param from From.
-     * @param to To.
-     */
-    private static void replaceWithBackup(File from, File to) {
-        if (!from.canRead())
-            exit("Failed to read source file '" + from.getAbsolutePath() + "'.", null);
-
-        println("Replacing file '" + to.getAbsolutePath() + "'.");
-
-        try {
-            U.copy(from, renameToBak(to), true);
-        }
-        catch (IOException e) {
-            exit("Failed to replace file '" + to.getAbsolutePath() + "'.", e);
-        }
-    }
-
-    /**
-     * Renames file for backup.
-     *
-     * @param file File.
-     * @return File.
-     */
-    private static File renameToBak(File file) {
-        DateFormat fmt = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss");
-
-        if (file.exists() && !file.renameTo(new File(file.getAbsolutePath() + "." + fmt.format(new Date()) + ".bak")))
-            exit("Failed to rename file '" + file.getPath() + "'.", null);
-
-        return file;
-    }
-
-    /**
-     * Checks if link is correct.
-     *
-     * @param link Symbolic link.
-     * @param correctTarget Correct link target.
-     * @return {@code true} If link target is correct.
-     */
-    private static boolean isJarLinkCorrect(File link, File correctTarget) {
-        if (!Files.isSymbolicLink(link.toPath()))
-            return false; // It is a real file or it does not exist.
-
-        Path target = null;
-
-        try {
-            target = Files.readSymbolicLink(link.toPath());
-        }
-        catch (IOException e) {
-            exit("Failed to read symbolic link: " + link.getAbsolutePath(), e);
-        }
-
-        return Files.exists(target) && target.toFile().equals(correctTarget);
-    }
-
-    /**
-     * Writes the question end read the boolean answer from the console.
-     *
-     * @param question Question to write.
-     * @return {@code true} if user inputs 'Y' or 'y', {@code false} otherwise.
-     */
-    private static boolean ask(String question) {
-        X.println();
-        X.print(" <  " + question + " (Y/N): ");
-
-        String answer = null;
-
-        if (!F.isEmpty(System.getenv("IGNITE_HADOOP_SETUP_YES")))
-            answer = "Y";
-        else {
-            BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
-
-            try {
-                answer = br.readLine();
-            }
-            catch (IOException e) {
-                exit("Failed to read answer: " + e.getMessage(), e);
-            }
-        }
-
-        if (answer != null && "Y".equals(answer.toUpperCase().trim())) {
-            X.println(" >  Yes.");
-
-            return true;
-        }
-        else {
-            X.println(" >  No.");
-
-            return false;
-        }
-    }
-
-    /**
-     * Exit with message.
-     *
-     * @param msg Exit message.
-     */
-    private static void exit(String msg, Exception e) {
-        X.println("    ");
-        X.println("  # " + msg);
-        X.println("  # Setup failed, exiting... ");
-
-        if (e != null && !F.isEmpty(System.getenv("IGNITE_HADOOP_SETUP_DEBUG")))
-            e.printStackTrace();
-
-        System.exit(1);
-    }
-
-    /**
-     * Prints message.
-     *
-     * @param msg Message.
-     */
-    private static void println(String msg) {
-        X.println("  > " + msg);
-    }
-
-    /**
-     * Prints warning.
-     *
-     * @param msg Message.
-     */
-    private static void warn(String msg) {
-        X.println("  ! " + msg);
-    }
-
-    /**
-     * Checks that CMD files have valid MS Windows new line characters. If not, writes question to console and reads the
-     * answer. If it's 'Y' then backups original files and corrects invalid new line characters.
-     *
-     * @param rootDir Root directory to process.
-     * @param dirs Directories inside of the root to process.
-     */
-    private static void processCmdFiles(File rootDir, String... dirs) {
-        boolean answer = false;
-
-        for (String dir : dirs) {
-            File subDir = new File(rootDir, dir);
-
-            File[] cmdFiles = subDir.listFiles(new FilenameFilter() {
-                @Override public boolean accept(File dir, String name) {
-                    return name.toLowerCase().endsWith(".cmd");
-                }
-            });
-
-            for (File file : cmdFiles) {
-                String content = null;
-
-                try (Scanner scanner = new Scanner(file)) {
-                    content = scanner.useDelimiter("\\Z").next();
-                }
-                catch (FileNotFoundException e) {
-                    exit("Failed to read file '" + file + "'.", e);
-                }
-
-                boolean invalid = false;
-
-                for (int i = 0; i < content.length(); i++) {
-                    if (content.charAt(i) == '\n' && (i == 0 || content.charAt(i - 1) != '\r')) {
-                        invalid = true;
-
-                        break;
-                    }
-                }
-
-                if (invalid) {
-                    answer = answer || ask("One or more *.CMD files has invalid new line character. Replace them?");
-
-                    if (!answer) {
-                        println("Ok. But Windows most probably will fail to execute them...");
-
-                        return;
-                    }
-
-                    println("Fixing newline characters in file '" + file.getAbsolutePath() + "'.");
-
-                    renameToBak(file);
-
-                    try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) {
-                        for (int i = 0; i < content.length(); i++) {
-                            if (content.charAt(i) == '\n' && (i == 0 || content.charAt(i - 1) != '\r'))
-                                writer.write("\r");
-
-                            writer.write(content.charAt(i));
-                        }
-                    }
-                    catch (IOException e) {
-                        exit("Failed to write file '" + file.getPath() + "': " + e.getMessage(), e);
-                    }
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
deleted file mode 100644
index 1dc8674..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteException;
-
-/**
- * Exception that throws when the task is cancelling.
- */
-public class HadoopTaskCancelledException extends IgniteException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * @param msg Exception message.
-     */
-    public HadoopTaskCancelledException(String msg) {
-        super(msg);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
deleted file mode 100644
index 65d9810..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutput;
-import java.io.ObjectOutputStream;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeSet;
-import java.util.UUID;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.JobPriority;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopSplitWrapper;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop utility methods.
- */
-public class HadoopUtils {
-    /** Property to store timestamp of new job id request. */
-    public static final String REQ_NEW_JOBID_TS_PROPERTY = "ignite.job.requestNewIdTs";
-
-    /** Property to store timestamp of response of new job id request. */
-    public static final String RESPONSE_NEW_JOBID_TS_PROPERTY = "ignite.job.responseNewIdTs";
-
-    /** Property to store timestamp of job submission. */
-    public static final String JOB_SUBMISSION_START_TS_PROPERTY = "ignite.job.submissionStartTs";
-
-    /** Property to set custom writer of job statistics. */
-    public static final String JOB_COUNTER_WRITER_PROPERTY = "ignite.counters.writer";
-
-    /** Staging constant. */
-    private static final String STAGING_CONSTANT = ".staging";
-
-    /** Old mapper class attribute. */
-    private static final String OLD_MAP_CLASS_ATTR = "mapred.mapper.class";
-
-    /** Old reducer class attribute. */
-    private static final String OLD_REDUCE_CLASS_ATTR = "mapred.reducer.class";
-
-    /**
-     * Constructor.
-     */
-    private HadoopUtils() {
-        // No-op.
-    }
-
-    /**
-     * Wraps native split.
-     *
-     * @param id Split ID.
-     * @param split Split.
-     * @param hosts Hosts.
-     * @throws IOException If failed.
-     */
-    public static HadoopSplitWrapper wrapSplit(int id, Object split, String[] hosts) throws IOException {
-        ByteArrayOutputStream arr = new ByteArrayOutputStream();
-        ObjectOutput out = new ObjectOutputStream(arr);
-
-        assert split instanceof Writable;
-
-        ((Writable)split).write(out);
-
-        out.flush();
-
-        return new HadoopSplitWrapper(id, split.getClass().getName(), arr.toByteArray(), hosts);
-    }
-
-    /**
-     * Unwraps native split.
-     *
-     * @param o Wrapper.
-     * @return Split.
-     */
-    public static Object unwrapSplit(HadoopSplitWrapper o) {
-        try {
-            Writable w = (Writable)HadoopUtils.class.getClassLoader().loadClass(o.className()).newInstance();
-
-            w.readFields(new ObjectInputStream(new ByteArrayInputStream(o.bytes())));
-
-            return w;
-        }
-        catch (Exception e) {
-            throw new IllegalStateException(e);
-        }
-    }
-
-    /**
-     * Convert Ignite job status to Hadoop job status.
-     *
-     * @param status Ignite job status.
-     * @return Hadoop job status.
-     */
-    public static JobStatus status(HadoopJobStatus status, Configuration conf) {
-        JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId());
-
-        float setupProgress = 0;
-        float mapProgress = 0;
-        float reduceProgress = 0;
-        float cleanupProgress = 0;
-
-        JobStatus.State state = JobStatus.State.RUNNING;
-
-        switch (status.jobPhase()) {
-            case PHASE_SETUP:
-                setupProgress = 0.42f;
-
-                break;
-
-            case PHASE_MAP:
-                setupProgress = 1;
-                mapProgress = 1f - status.pendingMapperCnt() / (float)status.totalMapperCnt();
-
-                break;
-
-            case PHASE_REDUCE:
-                setupProgress = 1;
-                mapProgress = 1;
-
-                if (status.totalReducerCnt() > 0)
-                    reduceProgress = 1f - status.pendingReducerCnt() / (float)status.totalReducerCnt();
-                else
-                    reduceProgress = 1f;
-
-                break;
-
-            case PHASE_CANCELLING:
-            case PHASE_COMPLETE:
-                if (!status.isFailed()) {
-                    setupProgress = 1;
-                    mapProgress = 1;
-                    reduceProgress = 1;
-                    cleanupProgress = 1;
-
-                    state = JobStatus.State.SUCCEEDED;
-                }
-                else
-                    state = JobStatus.State.FAILED;
-
-                break;
-
-            default:
-                assert false;
-        }
-
-        return new JobStatus(jobId, setupProgress, mapProgress, reduceProgress, cleanupProgress, state,
-            JobPriority.NORMAL, status.user(), status.jobName(), jobFile(conf, status.user(), jobId).toString(), "N/A");
-    }
-
-    /**
-     * Gets staging area directory.
-     *
-     * @param conf Configuration.
-     * @param usr User.
-     * @return Staging area directory.
-     */
-    public static Path stagingAreaDir(Configuration conf, String usr) {
-        return new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR, MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
-            + Path.SEPARATOR + usr + Path.SEPARATOR + STAGING_CONSTANT);
-    }
-
-    /**
-     * Gets job file.
-     *
-     * @param conf Configuration.
-     * @param usr User.
-     * @param jobId Job ID.
-     * @return Job file.
-     */
-    public static Path jobFile(Configuration conf, String usr, JobID jobId) {
-        return new Path(stagingAreaDir(conf, usr), jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
-    }
-
-    /**
-     * Checks the attribute in configuration is not set.
-     *
-     * @param attr Attribute name.
-     * @param msg Message for creation of exception.
-     * @throws IgniteCheckedException If attribute is set.
-     */
-    public static void ensureNotSet(Configuration cfg, String attr, String msg) throws IgniteCheckedException {
-        if (cfg.get(attr) != null)
-            throw new IgniteCheckedException(attr + " is incompatible with " + msg + " mode.");
-    }
-
-    /**
-     * Creates JobInfo from hadoop configuration.
-     *
-     * @param cfg Hadoop configuration.
-     * @return Job info.
-     * @throws IgniteCheckedException If failed.
-     */
-    public static HadoopDefaultJobInfo createJobInfo(Configuration cfg) throws IgniteCheckedException {
-        JobConf jobConf = new JobConf(cfg);
-
-        boolean hasCombiner = jobConf.get("mapred.combiner.class") != null
-                || jobConf.get(MRJobConfig.COMBINE_CLASS_ATTR) != null;
-
-        int numReduces = jobConf.getNumReduceTasks();
-
-        jobConf.setBooleanIfUnset("mapred.mapper.new-api", jobConf.get(OLD_MAP_CLASS_ATTR) == null);
-
-        if (jobConf.getUseNewMapper()) {
-            String mode = "new map API";
-
-            ensureNotSet(jobConf, "mapred.input.format.class", mode);
-            ensureNotSet(jobConf, OLD_MAP_CLASS_ATTR, mode);
-
-            if (numReduces != 0)
-                ensureNotSet(jobConf, "mapred.partitioner.class", mode);
-            else
-                ensureNotSet(jobConf, "mapred.output.format.class", mode);
-        }
-        else {
-            String mode = "map compatibility";
-
-            ensureNotSet(jobConf, MRJobConfig.INPUT_FORMAT_CLASS_ATTR, mode);
-            ensureNotSet(jobConf, MRJobConfig.MAP_CLASS_ATTR, mode);
-
-            if (numReduces != 0)
-                ensureNotSet(jobConf, MRJobConfig.PARTITIONER_CLASS_ATTR, mode);
-            else
-                ensureNotSet(jobConf, MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR, mode);
-        }
-
-        if (numReduces != 0) {
-            jobConf.setBooleanIfUnset("mapred.reducer.new-api", jobConf.get(OLD_REDUCE_CLASS_ATTR) == null);
-
-            if (jobConf.getUseNewReducer()) {
-                String mode = "new reduce API";
-
-                ensureNotSet(jobConf, "mapred.output.format.class", mode);
-                ensureNotSet(jobConf, OLD_REDUCE_CLASS_ATTR, mode);
-            }
-            else {
-                String mode = "reduce compatibility";
-
-                ensureNotSet(jobConf, MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR, mode);
-                ensureNotSet(jobConf, MRJobConfig.REDUCE_CLASS_ATTR, mode);
-            }
-        }
-
-        Map<String, String> props = new HashMap<>();
-
-        for (Map.Entry<String, String> entry : jobConf)
-            props.put(entry.getKey(), entry.getValue());
-
-        return new HadoopDefaultJobInfo(jobConf.getJobName(), jobConf.getUser(), hasCombiner, numReduces, props);
-    }
-
-    /**
-     * Throws new {@link IgniteCheckedException} with original exception is serialized into string.
-     * This is needed to transfer error outside the current class loader.
-     *
-     * @param e Original exception.
-     * @return IgniteCheckedException New exception.
-     */
-    public static IgniteCheckedException transformException(Throwable e) {
-        ByteArrayOutputStream os = new ByteArrayOutputStream();
-
-        e.printStackTrace(new PrintStream(os, true));
-
-        return new IgniteCheckedException(os.toString());
-    }
-
-    /**
-     * Returns work directory for job execution.
-     *
-     * @param locNodeId Local node ID.
-     * @param jobId Job ID.
-     * @return Working directory for job.
-     * @throws IgniteCheckedException If Failed.
-     */
-    public static File jobLocalDir(UUID locNodeId, HadoopJobId jobId) throws IgniteCheckedException {
-        return new File(new File(U.resolveWorkDirectory("hadoop", false), "node-" + locNodeId), "job_" + jobId);
-    }
-
-    /**
-     * Returns subdirectory of job working directory for task execution.
-     *
-     * @param locNodeId Local node ID.
-     * @param info Task info.
-     * @return Working directory for task.
-     * @throws IgniteCheckedException If Failed.
-     */
-    public static File taskLocalDir(UUID locNodeId, HadoopTaskInfo info) throws IgniteCheckedException {
-        File jobLocDir = jobLocalDir(locNodeId, info.jobId());
-
-        return new File(jobLocDir, info.type() + "_" + info.taskNumber() + "_" + info.attempt());
-    }
-
-    /**
-     * Creates {@link Configuration} in a correct class loader context to avoid caching
-     * of inappropriate class loader in the Configuration object.
-     * @return New instance of {@link Configuration}.
-     */
-    public static Configuration safeCreateConfiguration() {
-        final ClassLoader oldLdr = setContextClassLoader(Configuration.class.getClassLoader());
-
-        try {
-            return new Configuration();
-        }
-        finally {
-            restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /**
-     * Sort input splits by length.
-     *
-     * @param splits Splits.
-     * @return Sorted splits.
-     */
-    public static List<HadoopInputSplit> sortInputSplits(Collection<HadoopInputSplit> splits) {
-        int id = 0;
-
-        TreeSet<SplitSortWrapper> sortedSplits = new TreeSet<>();
-
-        for (HadoopInputSplit split : splits) {
-            long len = split instanceof HadoopFileBlock ? ((HadoopFileBlock)split).length() : 0;
-
-            sortedSplits.add(new SplitSortWrapper(id++, split, len));
-        }
-
-        ArrayList<HadoopInputSplit> res = new ArrayList<>(sortedSplits.size());
-
-        for (SplitSortWrapper sortedSplit : sortedSplits)
-            res.add(sortedSplit.split);
-
-        return res;
-    }
-
-    /**
-     * Set context class loader.
-     *
-     * @param newLdr New class loader.
-     * @return Old class loader.
-     */
-    @Nullable public static ClassLoader setContextClassLoader(@Nullable ClassLoader newLdr) {
-        ClassLoader oldLdr = Thread.currentThread().getContextClassLoader();
-
-        if (newLdr != oldLdr)
-            Thread.currentThread().setContextClassLoader(newLdr);
-
-        return oldLdr;
-    }
-
-    /**
-     * Restore context class loader.
-     *
-     * @param oldLdr Original class loader.
-     */
-    public static void restoreContextClassLoader(@Nullable ClassLoader oldLdr) {
-        ClassLoader newLdr = Thread.currentThread().getContextClassLoader();
-
-        if (newLdr != oldLdr)
-            Thread.currentThread().setContextClassLoader(oldLdr);
-    }
-
-    /**
-     * Split wrapper for sorting.
-     */
-    private static class SplitSortWrapper implements Comparable<SplitSortWrapper> {
-        /** Unique ID. */
-        private final int id;
-
-        /** Split. */
-        private final HadoopInputSplit split;
-
-        /** Split length. */
-        private final long len;
-
-        /**
-         * Constructor.
-         *
-         * @param id Unique ID.
-         * @param split Split.
-         * @param len Split length.
-         */
-        public SplitSortWrapper(int id, HadoopInputSplit split, long len) {
-            this.id = id;
-            this.split = split;
-            this.len = len;
-        }
-
-        /** {@inheritDoc} */
-        @SuppressWarnings("NullableProblems")
-        @Override public int compareTo(SplitSortWrapper other) {
-            assert other != null;
-
-            long res = len - other.len;
-
-            if (res > 0)
-                return -1;
-            else if (res < 0)
-                return 1;
-            else
-                return id - other.id;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            return id;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean equals(Object obj) {
-            return obj instanceof SplitSortWrapper && id == ((SplitSortWrapper)obj).id;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
deleted file mode 100644
index 3f682d3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.counter;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Default Hadoop counter implementation.
- */
-public abstract class HadoopCounterAdapter implements HadoopCounter, Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Counter group name. */
-    private String grp;
-
-    /** Counter name. */
-    private String name;
-
-    /**
-     * Default constructor required by {@link Externalizable}.
-     */
-    protected HadoopCounterAdapter() {
-        // No-op.
-    }
-
-    /**
-     * Creates new counter with given group and name.
-     *
-     * @param grp Counter group name.
-     * @param name Counter name.
-     */
-    protected HadoopCounterAdapter(String grp, String name) {
-        assert grp != null : "counter must have group";
-        assert name != null : "counter must have name";
-
-        this.grp = grp;
-        this.name = name;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String name() {
-        return name;
-    }
-
-    /** {@inheritDoc} */
-    @Override @Nullable public String group() {
-        return grp;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeUTF(grp);
-        out.writeUTF(name);
-        writeValue(out);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        grp = in.readUTF();
-        name = in.readUTF();
-        readValue(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o)
-            return true;
-        if (o == null || getClass() != o.getClass())
-            return false;
-
-        HadoopCounterAdapter cntr = (HadoopCounterAdapter)o;
-
-        if (!grp.equals(cntr.grp))
-            return false;
-        if (!name.equals(cntr.name))
-            return false;
-
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        int res = grp.hashCode();
-        res = 31 * res + name.hashCode();
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopCounterAdapter.class, this);
-    }
-
-    /**
-     * Writes value of this counter to output.
-     *
-     * @param out Output.
-     * @throws IOException If failed.
-     */
-    protected abstract void writeValue(ObjectOutput out) throws IOException;
-
-    /**
-     * Read value of this counter from input.
-     *
-     * @param in Input.
-     * @throws IOException If failed.
-     */
-    protected abstract void readValue(ObjectInput in) throws IOException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
deleted file mode 100644
index f3b5463..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.counter;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.lang.reflect.Constructor;
-import java.util.Collection;
-import java.util.concurrent.ConcurrentMap;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.util.lang.GridTuple3;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jsr166.ConcurrentHashMap8;
-
-/**
- * Default in-memory counters store.
- */
-public class HadoopCountersImpl implements HadoopCounters, Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    private final ConcurrentMap<CounterKey, HadoopCounter> cntrsMap = new ConcurrentHashMap8<>();
-
-    /**
-     * Default constructor. Creates new instance without counters.
-     */
-    public HadoopCountersImpl() {
-        // No-op.
-    }
-
-    /**
-     * Creates new instance that contain given counters.
-     *
-     * @param cntrs Counters to store.
-     */
-    public HadoopCountersImpl(Iterable<HadoopCounter> cntrs) {
-        addCounters(cntrs, true);
-    }
-
-    /**
-     * Copy constructor.
-     *
-     * @param cntrs Counters to copy.
-     */
-    public HadoopCountersImpl(HadoopCounters cntrs) {
-        this(cntrs.all());
-    }
-
-    /**
-     * Creates counter instance.
-     *
-     * @param cls Class of the counter.
-     * @param grp Group name.
-     * @param name Counter name.
-     * @return Counter.
-     */
-    private <T extends HadoopCounter> T createCounter(Class<? extends HadoopCounter> cls, String grp,
-        String name) {
-        try {
-            Constructor constructor = cls.getConstructor(String.class, String.class);
-
-            return (T)constructor.newInstance(grp, name);
-        }
-        catch (Exception e) {
-            throw new IgniteException(e);
-        }
-    }
-
-    /**
-     * Adds counters collection in addition to existing counters.
-     *
-     * @param cntrs Counters to add.
-     * @param cp Whether to copy counters or not.
-     */
-    private void addCounters(Iterable<HadoopCounter> cntrs, boolean cp) {
-        assert cntrs != null;
-
-        for (HadoopCounter cntr : cntrs) {
-            if (cp) {
-                HadoopCounter cntrCp = createCounter(cntr.getClass(), cntr.group(), cntr.name());
-
-                cntrCp.merge(cntr);
-
-                cntr = cntrCp;
-            }
-
-            cntrsMap.put(new CounterKey(cntr.getClass(), cntr.group(), cntr.name()), cntr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public <T extends HadoopCounter> T counter(String grp, String name, Class<T> cls) {
-        assert cls != null;
-
-        CounterKey mapKey = new CounterKey(cls, grp, name);
-
-        T cntr = (T)cntrsMap.get(mapKey);
-
-        if (cntr == null) {
-            cntr = createCounter(cls, grp, name);
-
-            T old = (T)cntrsMap.putIfAbsent(mapKey, cntr);
-
-            if (old != null)
-                return old;
-        }
-
-        return cntr;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<HadoopCounter> all() {
-        return cntrsMap.values();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void merge(HadoopCounters other) {
-        for (HadoopCounter counter : other.all())
-            counter(counter.group(), counter.name(), counter.getClass()).merge(counter);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeCollection(out, cntrsMap.values());
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        addCounters(U.<HadoopCounter>readCollection(in), false);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o)
-            return true;
-
-        if (o == null || getClass() != o.getClass())
-            return false;
-
-        HadoopCountersImpl counters = (HadoopCountersImpl)o;
-
-        return cntrsMap.equals(counters.cntrsMap);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return cntrsMap.hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopCountersImpl.class, this, "counters", cntrsMap.values());
-    }
-
-    /**
-     * The tuple of counter identifier components for more readable code.
-     */
-    private static class CounterKey extends GridTuple3<Class<? extends HadoopCounter>, String, String> {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /**
-         * Constructor.
-         *
-         * @param cls Class of the counter.
-         * @param grp Group name.
-         * @param name Counter name.
-         */
-        private CounterKey(Class<? extends HadoopCounter> cls, String grp, String name) {
-            super(cls, grp, name);
-        }
-
-        /**
-         * Empty constructor required by {@link Externalizable}.
-         */
-        public CounterKey() {
-            // No-op.
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
deleted file mode 100644
index 0d61e0d..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.counter;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Standard hadoop counter to use via original Hadoop API in Hadoop jobs.
- */
-public class HadoopLongCounter extends HadoopCounterAdapter {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** The counter value. */
-    private long val;
-
-    /**
-     * Default constructor required by {@link Externalizable}.
-     */
-    public HadoopLongCounter() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param grp Group name.
-     * @param name Counter name.
-     */
-    public HadoopLongCounter(String grp, String name) {
-        super(grp, name);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void writeValue(ObjectOutput out) throws IOException {
-        out.writeLong(val);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void readValue(ObjectInput in) throws IOException {
-        val = in.readLong();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void merge(HadoopCounter cntr) {
-        val += ((HadoopLongCounter)cntr).val;
-    }
-
-    /**
-     * Gets current value of this counter.
-     *
-     * @return Current value.
-     */
-    public long value() {
-        return val;
-    }
-
-    /**
-     * Sets current value by the given value.
-     *
-     * @param val Value to set.
-     */
-    public void value(long val) {
-        this.val = val;
-    }
-
-    /**
-     * Increment this counter by the given value.
-     *
-     * @param i Value to increase this counter by.
-     */
-    public void increment(long i) {
-        val += i;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
deleted file mode 100644
index dedc6b3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.counter;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.UUID;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_SUBMISSION_START_TS_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.REQ_NEW_JOBID_TS_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.RESPONSE_NEW_JOBID_TS_PROPERTY;
-
-/**
- * Counter for the job statistics accumulation.
- */
-public class HadoopPerformanceCounter extends HadoopCounterAdapter {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** The group name for this counter. */
-    private static final String GROUP_NAME = "SYSTEM";
-
-    /** The counter name for this counter. */
-    private static final String COUNTER_NAME = "PERFORMANCE";
-
-    /** Events collections. */
-    private Collection<T2<String,Long>> evts = new ArrayList<>();
-
-    /** Node id to insert into the event info. */
-    private UUID nodeId;
-
-    /** */
-    private int reducerNum;
-
-    /** */
-    private volatile Long firstShuffleMsg;
-
-    /** */
-    private volatile Long lastShuffleMsg;
-
-    /**
-     * Default constructor required by {@link Externalizable}.
-     */
-    public HadoopPerformanceCounter() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param grp Group name.
-     * @param name Counter name.
-     */
-    public HadoopPerformanceCounter(String grp, String name) {
-        super(grp, name);
-    }
-
-    /**
-     * Constructor to create instance to use this as helper.
-     *
-     * @param nodeId Id of the work node.
-     */
-    public HadoopPerformanceCounter(UUID nodeId) {
-        this.nodeId = nodeId;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void writeValue(ObjectOutput out) throws IOException {
-        U.writeCollection(out, evts);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void readValue(ObjectInput in) throws IOException {
-        try {
-            evts = U.readCollection(in);
-        }
-        catch (ClassNotFoundException e) {
-            throw new IOException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void merge(HadoopCounter cntr) {
-        evts.addAll(((HadoopPerformanceCounter)cntr).evts);
-    }
-
-    /**
-     * Gets the events collection.
-     *
-     * @return Collection of event.
-     */
-    public Collection<T2<String, Long>> evts() {
-        return evts;
-    }
-
-    /**
-     * Generate name that consists of some event information.
-     *
-     * @param info Task info.
-     * @param evtType The type of the event.
-     * @return String contains necessary event information.
-     */
-    private String eventName(HadoopTaskInfo info, String evtType) {
-        return eventName(info.type().toString(), info.taskNumber(), evtType);
-    }
-
-    /**
-     * Generate name that consists of some event information.
-     *
-     * @param taskType Task type.
-     * @param taskNum Number of the task.
-     * @param evtType The type of the event.
-     * @return String contains necessary event information.
-     */
-    private String eventName(String taskType, int taskNum, String evtType) {
-        assert nodeId != null;
-
-        return taskType + " " + taskNum + " " + evtType + " " + nodeId;
-    }
-
-    /**
-     * Adds event of the task submission (task instance creation).
-     *
-     * @param info Task info.
-     * @param ts Timestamp of the event.
-     */
-    public void onTaskSubmit(HadoopTaskInfo info, long ts) {
-        evts.add(new T2<>(eventName(info, "submit"), ts));
-    }
-
-    /**
-     * Adds event of the task preparation.
-     *
-     * @param info Task info.
-     * @param ts Timestamp of the event.
-     */
-    public void onTaskPrepare(HadoopTaskInfo info, long ts) {
-        evts.add(new T2<>(eventName(info, "prepare"), ts));
-    }
-
-    /**
-     * Adds event of the task finish.
-     *
-     * @param info Task info.
-     * @param ts Timestamp of the event.
-     */
-    public void onTaskFinish(HadoopTaskInfo info, long ts) {
-        if (info.type() == HadoopTaskType.REDUCE && lastShuffleMsg != null) {
-            evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "start"), firstShuffleMsg));
-            evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "finish"), lastShuffleMsg));
-
-            lastShuffleMsg = null;
-        }
-
-        evts.add(new T2<>(eventName(info, "finish"), ts));
-    }
-
-    /**
-     * Adds event of the task run.
-     *
-     * @param info Task info.
-     * @param ts Timestamp of the event.
-     */
-    public void onTaskStart(HadoopTaskInfo info, long ts) {
-        evts.add(new T2<>(eventName(info, "start"), ts));
-    }
-
-    /**
-     * Adds event of the job preparation.
-     *
-     * @param ts Timestamp of the event.
-     */
-    public void onJobPrepare(long ts) {
-        assert nodeId != null;
-
-        evts.add(new T2<>("JOB prepare " + nodeId, ts));
-    }
-
-    /**
-     * Adds event of the job start.
-     *
-     * @param ts Timestamp of the event.
-     */
-    public void onJobStart(long ts) {
-        assert nodeId != null;
-
-        evts.add(new T2<>("JOB start " + nodeId, ts));
-    }
-
-    /**
-     * Adds client submission events from job info.
-     *
-     * @param info Job info.
-     */
-    public void clientSubmissionEvents(HadoopJobInfo info) {
-        assert nodeId != null;
-
-        addEventFromProperty("JOB requestId", info, REQ_NEW_JOBID_TS_PROPERTY);
-        addEventFromProperty("JOB responseId", info, RESPONSE_NEW_JOBID_TS_PROPERTY);
-        addEventFromProperty("JOB submit", info, JOB_SUBMISSION_START_TS_PROPERTY);
-    }
-
-    /**
-     * Adds event with timestamp from some property in job info.
-     *
-     * @param evt Event type and phase.
-     * @param info Job info.
-     * @param propName Property name to get timestamp.
-     */
-    private void addEventFromProperty(String evt, HadoopJobInfo info, String propName) {
-        String val = info.property(propName);
-
-        if (!F.isEmpty(val)) {
-            try {
-                evts.add(new T2<>(evt + " " + nodeId, Long.parseLong(val)));
-            }
-            catch (NumberFormatException e) {
-                throw new IllegalStateException("Invalid value '" + val + "' of property '" + propName + "'", e);
-            }
-        }
-    }
-
-    /**
-     * Registers shuffle message event.
-     *
-     * @param reducerNum Number of reducer that receives the data.
-     * @param ts Timestamp of the event.
-     */
-    public void onShuffleMessage(int reducerNum, long ts) {
-        this.reducerNum = reducerNum;
-
-        if (firstShuffleMsg == null)
-            firstShuffleMsg = ts;
-
-        lastShuffleMsg = ts;
-    }
-
-    /**
-     * Gets system predefined performance counter from the HadoopCounters object.
-     *
-     * @param cntrs HadoopCounters object.
-     * @param nodeId Node id for methods that adds events. It may be null if you don't use ones.
-     * @return Predefined performance counter.
-     */
-    public static HadoopPerformanceCounter getCounter(HadoopCounters cntrs, @Nullable UUID nodeId) {
-        HadoopPerformanceCounter cntr = cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class);
-
-        if (nodeId != null)
-            cntr.nodeId(nodeId);
-
-        return cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class);
-    }
-
-    /**
-     * Sets the nodeId field.
-     *
-     * @param nodeId Node id.
-     */
-    private void nodeId(UUID nodeId) {
-        this.nodeId = nodeId;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
deleted file mode 100644
index 1ecbee5..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemCacheUtils.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.fs;
-
-import java.io.IOException;
-import java.net.URI;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.util.GridStringBuilder;
-import org.apache.ignite.internal.util.typedef.F;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * File system cache utility methods used by Map-Reduce tasks and jobs.
- */
-public class HadoopFileSystemCacheUtils {
-    /**
-     * A common static factory method. Creates new HadoopLazyConcurrentMap.
-     * @return a new HadoopLazyConcurrentMap.
-     */
-    public static HadoopLazyConcurrentMap<FsCacheKey, FileSystem> createHadoopLazyConcurrentMap() {
-        return new HadoopLazyConcurrentMap<>(
-            new HadoopLazyConcurrentMap.ValueFactory<FsCacheKey, FileSystem>() {
-                @Override public FileSystem createValue(FsCacheKey key) throws IOException {
-                    try {
-                        assert key != null;
-
-                        // Explicitly disable FileSystem caching:
-                        URI uri = key.uri();
-
-                        String scheme = uri.getScheme();
-
-                        // Copy the configuration to avoid altering the external object.
-                        Configuration cfg = new Configuration(key.configuration());
-
-                        String prop = HadoopFileSystemsUtils.disableFsCachePropertyName(scheme);
-
-                        cfg.setBoolean(prop, true);
-
-                        return FileSystem.get(uri, cfg, key.user());
-                    }
-                    catch (InterruptedException e) {
-                        Thread.currentThread().interrupt();
-
-                        throw new IOException("Failed to create file system due to interrupt.", e);
-                    }
-                }
-            }
-        );
-    }
-
-    /**
-     * Gets non-null user name as per the Hadoop viewpoint.
-     * @param cfg the Hadoop job configuration, may be null.
-     * @return the user name, never null.
-     */
-    private static String getMrHadoopUser(Configuration cfg) throws IOException {
-        String user = cfg.get(MRJobConfig.USER_NAME);
-
-        if (user == null)
-            user = IgniteHadoopFileSystem.getFsHadoopUser();
-
-        return user;
-    }
-
-    /**
-     * Common method to get the V1 file system in MapRed engine.
-     * It gets the filesystem for the user specified in the
-     * configuration with {@link MRJobConfig#USER_NAME} property.
-     * The file systems are created and cached in the given map upon first request.
-     *
-     * @param uri The file system uri.
-     * @param cfg The configuration.
-     * @param map The caching map.
-     * @return The file system.
-     * @throws IOException On error.
-     */
-    public static FileSystem fileSystemForMrUserWithCaching(@Nullable URI uri, Configuration cfg,
-        HadoopLazyConcurrentMap<FsCacheKey, FileSystem> map)
-            throws IOException {
-        assert map != null;
-        assert cfg != null;
-
-        final String usr = getMrHadoopUser(cfg);
-
-        assert usr != null;
-
-        if (uri == null)
-            uri = FileSystem.getDefaultUri(cfg);
-
-        final FileSystem fs;
-
-        try {
-            final FsCacheKey key = new FsCacheKey(uri, usr, cfg);
-
-            fs = map.getOrCreate(key);
-        }
-        catch (IgniteException ie) {
-            throw new IOException(ie);
-        }
-
-        assert fs != null;
-        assert !(fs instanceof IgniteHadoopFileSystem) || F.eq(usr, ((IgniteHadoopFileSystem)fs).user());
-
-        return fs;
-    }
-
-    /**
-     * Takes Fs URI using logic similar to that used in FileSystem#get(1,2,3).
-     * @param uri0 The uri.
-     * @param cfg The cfg.
-     * @return Correct URI.
-     */
-    private static URI fixUri(URI uri0, Configuration cfg) {
-        if (uri0 == null)
-            return FileSystem.getDefaultUri(cfg);
-
-        String scheme = uri0.getScheme();
-        String authority = uri0.getAuthority();
-
-        if (authority == null) {
-            URI dfltUri = FileSystem.getDefaultUri(cfg);
-
-            if (scheme == null || (scheme.equals(dfltUri.getScheme()) && dfltUri.getAuthority() != null))
-                return dfltUri;
-        }
-
-        return uri0;
-    }
-
-    /**
-     * Note that configuration is not a part of the key.
-     * It is used solely to initialize the first instance
-     * that is created for the key.
-     */
-    public static final class FsCacheKey {
-        /** */
-        private final URI uri;
-
-        /** */
-        private final String usr;
-
-        /** */
-        private final String equalityKey;
-
-        /** */
-        private final Configuration cfg;
-
-        /**
-         * Constructor
-         */
-        public FsCacheKey(URI uri, String usr, Configuration cfg) {
-            assert uri != null;
-            assert usr != null;
-            assert cfg != null;
-
-            this.uri = fixUri(uri, cfg);
-            this.usr = usr;
-            this.cfg = cfg;
-
-            this.equalityKey = createEqualityKey();
-        }
-
-        /**
-         * Creates String key used for equality and hashing.
-         */
-        private String createEqualityKey() {
-            GridStringBuilder sb = new GridStringBuilder("(").a(usr).a(")@");
-
-            if (uri.getScheme() != null)
-                sb.a(uri.getScheme().toLowerCase());
-
-            sb.a("://");
-
-            if (uri.getAuthority() != null)
-                sb.a(uri.getAuthority().toLowerCase());
-
-            return sb.toString();
-        }
-
-        /**
-         * The URI.
-         */
-        public URI uri() {
-            return uri;
-        }
-
-        /**
-         * The User.
-         */
-        public String user() {
-            return usr;
-        }
-
-        /**
-         * The Configuration.
-         */
-        public Configuration configuration() {
-            return cfg;
-        }
-
-        /** {@inheritDoc} */
-        @SuppressWarnings("SimplifiableIfStatement")
-        @Override public boolean equals(Object obj) {
-            if (obj == this)
-                return true;
-
-            if (obj == null || getClass() != obj.getClass())
-                return false;
-
-            return equalityKey.equals(((FsCacheKey)obj).equalityKey);
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            return equalityKey.hashCode();
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return equalityKey;
-        }
-    }
-}
\ No newline at end of file


[24/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/tom-sawyer.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/tom-sawyer.txt b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/tom-sawyer.txt
new file mode 100644
index 0000000..1806925
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/tom-sawyer.txt
@@ -0,0 +1,8858 @@
+The Project Gutenberg EBook of The Adventures of Tom Sawyer, Complete
+by Mark Twain (Samuel Clemens)
+
+This eBook is for the use of anyone anywhere at no cost and with
+almost no restrictions whatsoever.  You may copy it, give it away or
+re-use it under the terms of the Project Gutenberg License included
+with this eBook or online at www.gutenberg.net
+
+
+Title: The Adventures of Tom Sawyer, Complete
+
+Author: Mark Twain (Samuel Clemens)
+
+Release Date: August 20, 2006 [EBook #74]
+[Last updated: May 3, 2011]
+
+Language: English
+
+
+*** START OF THIS PROJECT GUTENBERG EBOOK TOM SAWYER ***
+
+
+
+
+Produced by David Widger. The previous edition was updated by Jose
+Menendez.
+
+
+
+
+
+                   THE ADVENTURES OF TOM SAWYER
+                                BY
+                            MARK TWAIN
+                     (Samuel Langhorne Clemens)
+
+
+
+
+                           P R E F A C E
+
+MOST of the adventures recorded in this book really occurred; one or
+two were experiences of my own, the rest those of boys who were
+schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but
+not from an individual--he is a combination of the characteristics of
+three boys whom I knew, and therefore belongs to the composite order of
+architecture.
+
+The odd superstitions touched upon were all prevalent among children
+and slaves in the West at the period of this story--that is to say,
+thirty or forty years ago.
+
+Although my book is intended mainly for the entertainment of boys and
+girls, I hope it will not be shunned by men and women on that account,
+for part of my plan has been to try to pleasantly remind adults of what
+they once were themselves, and of how they felt and thought and talked,
+and what queer enterprises they sometimes engaged in.
+
+                                                            THE AUTHOR.
+
+HARTFORD, 1876.
+
+
+
+                          T O M   S A W Y E R
+
+
+
+CHAPTER I
+
+"TOM!"
+
+No answer.
+
+"TOM!"
+
+No answer.
+
+"What's gone with that boy,  I wonder? You TOM!"
+
+No answer.
+
+The old lady pulled her spectacles down and looked over them about the
+room; then she put them up and looked out under them. She seldom or
+never looked THROUGH them for so small a thing as a boy; they were her
+state pair, the pride of her heart, and were built for "style," not
+service--she could have seen through a pair of stove-lids just as well.
+She looked perplexed for a moment, and then said, not fiercely, but
+still loud enough for the furniture to hear:
+
+"Well, I lay if I get hold of you I'll--"
+
+She did not finish, for by this time she was bending down and punching
+under the bed with the broom, and so she needed breath to punctuate the
+punches with. She resurrected nothing but the cat.
+
+"I never did see the beat of that boy!"
+
+She went to the open door and stood in it and looked out among the
+tomato vines and "jimpson" weeds that constituted the garden. No Tom.
+So she lifted up her voice at an angle calculated for distance and
+shouted:
+
+"Y-o-u-u TOM!"
+
+There was a slight noise behind her and she turned just in time to
+seize a small boy by the slack of his roundabout and arrest his flight.
+
+"There! I might 'a' thought of that closet. What you been doing in
+there?"
+
+"Nothing."
+
+"Nothing! Look at your hands. And look at your mouth. What IS that
+truck?"
+
+"I don't know, aunt."
+
+"Well, I know. It's jam--that's what it is. Forty times I've said if
+you didn't let that jam alone I'd skin you. Hand me that switch."
+
+The switch hovered in the air--the peril was desperate--
+
+"My! Look behind you, aunt!"
+
+The old lady whirled round, and snatched her skirts out of danger. The
+lad fled on the instant, scrambled up the high board-fence, and
+disappeared over it.
+
+His aunt Polly stood surprised a moment, and then broke into a gentle
+laugh.
+
+"Hang the boy, can't I never learn anything? Ain't he played me tricks
+enough like that for me to be looking out for him by this time? But old
+fools is the biggest fools there is. Can't learn an old dog new tricks,
+as the saying is. But my goodness, he never plays them alike, two days,
+and how is a body to know what's coming? He 'pears to know just how
+long he can torment me before I get my dander up, and he knows if he
+can make out to put me off for a minute or make me laugh, it's all down
+again and I can't hit him a lick. I ain't doing my duty by that boy,
+and that's the Lord's truth, goodness knows. Spare the rod and spile
+the child, as the Good Book says. I'm a laying up sin and suffering for
+us both, I know. He's full of the Old Scratch, but laws-a-me! he's my
+own dead sister's boy, poor thing, and I ain't got the heart to lash
+him, somehow. Every time I let him off, my conscience does hurt me so,
+and every time I hit him my old heart most breaks. Well-a-well, man
+that is born of woman is of few days and full of trouble, as the
+Scripture says, and I reckon it's so. He'll play hookey this evening, *
+and [* Southwestern for "afternoon"] I'll just be obleeged to make him
+work, to-morrow, to punish him. It's mighty hard to make him work
+Saturdays, when all the boys is having holiday, but he hates work more
+than he hates anything else, and I've GOT to do some of my duty by him,
+or I'll be the ruination of the child."
+
+Tom did play hookey, and he had a very good time. He got back home
+barely in season to help Jim, the small colored boy, saw next-day's
+wood and split the kindlings before supper--at least he was there in
+time to tell his adventures to Jim while Jim did three-fourths of the
+work. Tom's younger brother (or rather half-brother) Sid was already
+through with his part of the work (picking up chips), for he was a
+quiet boy, and had no adventurous, troublesome ways.
+
+While Tom was eating his supper, and stealing sugar as opportunity
+offered, Aunt Polly asked him questions that were full of guile, and
+very deep--for she wanted to trap him into damaging revealments. Like
+many other simple-hearted souls, it was her pet vanity to believe she
+was endowed with a talent for dark and mysterious diplomacy, and she
+loved to contemplate her most transparent devices as marvels of low
+cunning. Said she:
+
+"Tom, it was middling warm in school, warn't it?"
+
+"Yes'm."
+
+"Powerful warm, warn't it?"
+
+"Yes'm."
+
+"Didn't you want to go in a-swimming, Tom?"
+
+A bit of a scare shot through Tom--a touch of uncomfortable suspicion.
+He searched Aunt Polly's face, but it told him nothing. So he said:
+
+"No'm--well, not very much."
+
+The old lady reached out her hand and felt Tom's shirt, and said:
+
+"But you ain't too warm now, though." And it flattered her to reflect
+that she had discovered that the shirt was dry without anybody knowing
+that that was what she had in her mind. But in spite of her, Tom knew
+where the wind lay, now. So he forestalled what might be the next move:
+
+"Some of us pumped on our heads--mine's damp yet. See?"
+
+Aunt Polly was vexed to think she had overlooked that bit of
+circumstantial evidence, and missed a trick. Then she had a new
+inspiration:
+
+"Tom, you didn't have to undo your shirt collar where I sewed it, to
+pump on your head, did you? Unbutton your jacket!"
+
+The trouble vanished out of Tom's face. He opened his jacket. His
+shirt collar was securely sewed.
+
+"Bother! Well, go 'long with you. I'd made sure you'd played hookey
+and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a
+singed cat, as the saying is--better'n you look. THIS time."
+
+She was half sorry her sagacity had miscarried, and half glad that Tom
+had stumbled into obedient conduct for once.
+
+But Sidney said:
+
+"Well, now, if I didn't think you sewed his collar with white thread,
+but it's black."
+
+"Why, I did sew it with white! Tom!"
+
+But Tom did not wait for the rest. As he went out at the door he said:
+
+"Siddy, I'll lick you for that."
+
+In a safe place Tom examined two large needles which were thrust into
+the lapels of his jacket, and had thread bound about them--one needle
+carried white thread and the other black. He said:
+
+"She'd never noticed if it hadn't been for Sid. Confound it! sometimes
+she sews it with white, and sometimes she sews it with black. I wish to
+geeminy she'd stick to one or t'other--I can't keep the run of 'em. But
+I bet you I'll lam Sid for that. I'll learn him!"
+
+He was not the Model Boy of the village. He knew the model boy very
+well though--and loathed him.
+
+Within two minutes, or even less, he had forgotten all his troubles.
+Not because his troubles were one whit less heavy and bitter to him
+than a man's are to a man, but because a new and powerful interest bore
+them down and drove them out of his mind for the time--just as men's
+misfortunes are forgotten in the excitement of new enterprises. This
+new interest was a valued novelty in whistling, which he had just
+acquired from a negro, and he was suffering to practise it undisturbed.
+It consisted in a peculiar bird-like turn, a sort of liquid warble,
+produced by touching the tongue to the roof of the mouth at short
+intervals in the midst of the music--the reader probably remembers how
+to do it, if he has ever been a boy. Diligence and attention soon gave
+him the knack of it, and he strode down the street with his mouth full
+of harmony and his soul full of gratitude. He felt much as an
+astronomer feels who has discovered a new planet--no doubt, as far as
+strong, deep, unalloyed pleasure is concerned, the advantage was with
+the boy, not the astronomer.
+
+The summer evenings were long. It was not dark, yet. Presently Tom
+checked his whistle. A stranger was before him--a boy a shade larger
+than himself. A new-comer of any age or either sex was an impressive
+curiosity in the poor little shabby village of St. Petersburg. This boy
+was well dressed, too--well dressed on a week-day. This was simply
+astounding. His cap was a dainty thing, his close-buttoned blue cloth
+roundabout was new and natty, and so were his pantaloons. He had shoes
+on--and it was only Friday. He even wore a necktie, a bright bit of
+ribbon. He had a citified air about him that ate into Tom's vitals. The
+more Tom stared at the splendid marvel, the higher he turned up his
+nose at his finery and the shabbier and shabbier his own outfit seemed
+to him to grow. Neither boy spoke. If one moved, the other moved--but
+only sidewise, in a circle; they kept face to face and eye to eye all
+the time. Finally Tom said:
+
+"I can lick you!"
+
+"I'd like to see you try it."
+
+"Well, I can do it."
+
+"No you can't, either."
+
+"Yes I can."
+
+"No you can't."
+
+"I can."
+
+"You can't."
+
+"Can!"
+
+"Can't!"
+
+An uncomfortable pause. Then Tom said:
+
+"What's your name?"
+
+"'Tisn't any of your business, maybe."
+
+"Well I 'low I'll MAKE it my business."
+
+"Well why don't you?"
+
+"If you say much, I will."
+
+"Much--much--MUCH. There now."
+
+"Oh, you think you're mighty smart, DON'T you? I could lick you with
+one hand tied behind me, if I wanted to."
+
+"Well why don't you DO it? You SAY you can do it."
+
+"Well I WILL, if you fool with me."
+
+"Oh yes--I've seen whole families in the same fix."
+
+"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!"
+
+"You can lump that hat if you don't like it. I dare you to knock it
+off--and anybody that'll take a dare will suck eggs."
+
+"You're a liar!"
+
+"You're another."
+
+"You're a fighting liar and dasn't take it up."
+
+"Aw--take a walk!"
+
+"Say--if you give me much more of your sass I'll take and bounce a
+rock off'n your head."
+
+"Oh, of COURSE you will."
+
+"Well I WILL."
+
+"Well why don't you DO it then? What do you keep SAYING you will for?
+Why don't you DO it? It's because you're afraid."
+
+"I AIN'T afraid."
+
+"You are."
+
+"I ain't."
+
+"You are."
+
+Another pause, and more eying and sidling around each other. Presently
+they were shoulder to shoulder. Tom said:
+
+"Get away from here!"
+
+"Go away yourself!"
+
+"I won't."
+
+"I won't either."
+
+So they stood, each with a foot placed at an angle as a brace, and
+both shoving with might and main, and glowering at each other with
+hate. But neither could get an advantage. After struggling till both
+were hot and flushed, each relaxed his strain with watchful caution,
+and Tom said:
+
+"You're a coward and a pup. I'll tell my big brother on you, and he
+can thrash you with his little finger, and I'll make him do it, too."
+
+"What do I care for your big brother? I've got a brother that's bigger
+than he is--and what's more, he can throw him over that fence, too."
+[Both brothers were imaginary.]
+
+"That's a lie."
+
+"YOUR saying so don't make it so."
+
+Tom drew a line in the dust with his big toe, and said:
+
+"I dare you to step over that, and I'll lick you till you can't stand
+up. Anybody that'll take a dare will steal sheep."
+
+The new boy stepped over promptly, and said:
+
+"Now you said you'd do it, now let's see you do it."
+
+"Don't you crowd me now; you better look out."
+
+"Well, you SAID you'd do it--why don't you do it?"
+
+"By jingo! for two cents I WILL do it."
+
+The new boy took two broad coppers out of his pocket and held them out
+with derision. Tom struck them to the ground. In an instant both boys
+were rolling and tumbling in the dirt, gripped together like cats; and
+for the space of a minute they tugged and tore at each other's hair and
+clothes, punched and scratched each other's nose, and covered
+themselves with dust and glory. Presently the confusion took form, and
+through the fog of battle Tom appeared, seated astride the new boy, and
+pounding him with his fists. "Holler 'nuff!" said he.
+
+The boy only struggled to free himself. He was crying--mainly from rage.
+
+"Holler 'nuff!"--and the pounding went on.
+
+At last the stranger got out a smothered "'Nuff!" and Tom let him up
+and said:
+
+"Now that'll learn you. Better look out who you're fooling with next
+time."
+
+The new boy went off brushing the dust from his clothes, sobbing,
+snuffling, and occasionally looking back and shaking his head and
+threatening what he would do to Tom the "next time he caught him out."
+To which Tom responded with jeers, and started off in high feather, and
+as soon as his back was turned the new boy snatched up a stone, threw
+it and hit him between the shoulders and then turned tail and ran like
+an antelope. Tom chased the traitor home, and thus found out where he
+lived. He then held a position at the gate for some time, daring the
+enemy to come outside, but the enemy only made faces at him through the
+window and declined. At last the enemy's mother appeared, and called
+Tom a bad, vicious, vulgar child, and ordered him away. So he went
+away; but he said he "'lowed" to "lay" for that boy.
+
+He got home pretty late that night, and when he climbed cautiously in
+at the window, he uncovered an ambuscade, in the person of his aunt;
+and when she saw the state his clothes were in her resolution to turn
+his Saturday holiday into captivity at hard labor became adamantine in
+its firmness.
+
+
+
+CHAPTER II
+
+SATURDAY morning was come, and all the summer world was bright and
+fresh, and brimming with life. There was a song in every heart; and if
+the heart was young the music issued at the lips. There was cheer in
+every face and a spring in every step. The locust-trees were in bloom
+and the fragrance of the blossoms filled the air. Cardiff Hill, beyond
+the village and above it, was green with vegetation and it lay just far
+enough away to seem a Delectable Land, dreamy, reposeful, and inviting.
+
+Tom appeared on the sidewalk with a bucket of whitewash and a
+long-handled brush. He surveyed the fence, and all gladness left him and
+a deep melancholy settled down upon his spirit. Thirty yards of board
+fence nine feet high. Life to him seemed hollow, and existence but a
+burden. Sighing, he dipped his brush and passed it along the topmost
+plank; repeated the operation; did it again; compared the insignificant
+whitewashed streak with the far-reaching continent of unwhitewashed
+fence, and sat down on a tree-box discouraged. Jim came skipping out at
+the gate with a tin pail, and singing Buffalo Gals. Bringing water from
+the town pump had always been hateful work in Tom's eyes, before, but
+now it did not strike him so. He remembered that there was company at
+the pump. White, mulatto, and negro boys and girls were always there
+waiting their turns, resting, trading playthings, quarrelling,
+fighting, skylarking. And he remembered that although the pump was only
+a hundred and fifty yards off, Jim never got back with a bucket of
+water under an hour--and even then somebody generally had to go after
+him. Tom said:
+
+"Say, Jim, I'll fetch the water if you'll whitewash some."
+
+Jim shook his head and said:
+
+"Can't, Mars Tom. Ole missis, she tole me I got to go an' git dis
+water an' not stop foolin' roun' wid anybody. She say she spec' Mars
+Tom gwine to ax me to whitewash, an' so she tole me go 'long an' 'tend
+to my own business--she 'lowed SHE'D 'tend to de whitewashin'."
+
+"Oh, never you mind what she said, Jim. That's the way she always
+talks. Gimme the bucket--I won't be gone only a a minute. SHE won't
+ever know."
+
+"Oh, I dasn't, Mars Tom. Ole missis she'd take an' tar de head off'n
+me. 'Deed she would."
+
+"SHE! She never licks anybody--whacks 'em over the head with her
+thimble--and who cares for that, I'd like to know. She talks awful, but
+talk don't hurt--anyways it don't if she don't cry. Jim, I'll give you
+a marvel. I'll give you a white alley!"
+
+Jim began to waver.
+
+"White alley, Jim! And it's a bully taw."
+
+"My! Dat's a mighty gay marvel, I tell you! But Mars Tom I's powerful
+'fraid ole missis--"
+
+"And besides, if you will I'll show you my sore toe."
+
+Jim was only human--this attraction was too much for him. He put down
+his pail, took the white alley, and bent over the toe with absorbing
+interest while the bandage was being unwound. In another moment he was
+flying down the street with his pail and a tingling rear, Tom was
+whitewashing with vigor, and Aunt Polly was retiring from the field
+with a slipper in her hand and triumph in her eye.
+
+But Tom's energy did not last. He began to think of the fun he had
+planned for this day, and his sorrows multiplied. Soon the free boys
+would come tripping along on all sorts of delicious expeditions, and
+they would make a world of fun of him for having to work--the very
+thought of it burnt him like fire. He got out his worldly wealth and
+examined it--bits of toys, marbles, and trash; enough to buy an
+exchange of WORK, maybe, but not half enough to buy so much as half an
+hour of pure freedom. So he returned his straitened means to his
+pocket, and gave up the idea of trying to buy the boys. At this dark
+and hopeless moment an inspiration burst upon him! Nothing less than a
+great, magnificent inspiration.
+
+He took up his brush and went tranquilly to work. Ben Rogers hove in
+sight presently--the very boy, of all boys, whose ridicule he had been
+dreading. Ben's gait was the hop-skip-and-jump--proof enough that his
+heart was light and his anticipations high. He was eating an apple, and
+giving a long, melodious whoop, at intervals, followed by a deep-toned
+ding-dong-dong, ding-dong-dong, for he was personating a steamboat. As
+he drew near, he slackened speed, took the middle of the street, leaned
+far over to starboard and rounded to ponderously and with laborious
+pomp and circumstance--for he was personating the Big Missouri, and
+considered himself to be drawing nine feet of water. He was boat and
+captain and engine-bells combined, so he had to imagine himself
+standing on his own hurricane-deck giving the orders and executing them:
+
+"Stop her, sir! Ting-a-ling-ling!" The headway ran almost out, and he
+drew up slowly toward the sidewalk.
+
+"Ship up to back! Ting-a-ling-ling!" His arms straightened and
+stiffened down his sides.
+
+"Set her back on the stabboard! Ting-a-ling-ling! Chow! ch-chow-wow!
+Chow!" His right hand, meantime, describing stately circles--for it was
+representing a forty-foot wheel.
+
+"Let her go back on the labboard! Ting-a-lingling! Chow-ch-chow-chow!"
+The left hand began to describe circles.
+
+"Stop the stabboard! Ting-a-ling-ling! Stop the labboard! Come ahead
+on the stabboard! Stop her! Let your outside turn over slow!
+Ting-a-ling-ling! Chow-ow-ow! Get out that head-line! LIVELY now!
+Come--out with your spring-line--what're you about there! Take a turn
+round that stump with the bight of it! Stand by that stage, now--let her
+go! Done with the engines, sir! Ting-a-ling-ling! SH'T! S'H'T! SH'T!"
+(trying the gauge-cocks).
+
+Tom went on whitewashing--paid no attention to the steamboat. Ben
+stared a moment and then said: "Hi-YI! YOU'RE up a stump, ain't you!"
+
+No answer. Tom surveyed his last touch with the eye of an artist, then
+he gave his brush another gentle sweep and surveyed the result, as
+before. Ben ranged up alongside of him. Tom's mouth watered for the
+apple, but he stuck to his work. Ben said:
+
+"Hello, old chap, you got to work, hey?"
+
+Tom wheeled suddenly and said:
+
+"Why, it's you, Ben! I warn't noticing."
+
+"Say--I'm going in a-swimming, I am. Don't you wish you could? But of
+course you'd druther WORK--wouldn't you? Course you would!"
+
+Tom contemplated the boy a bit, and said:
+
+"What do you call work?"
+
+"Why, ain't THAT work?"
+
+Tom resumed his whitewashing, and answered carelessly:
+
+"Well, maybe it is, and maybe it ain't. All I know, is, it suits Tom
+Sawyer."
+
+"Oh come, now, you don't mean to let on that you LIKE it?"
+
+The brush continued to move.
+
+"Like it? Well, I don't see why I oughtn't to like it. Does a boy get
+a chance to whitewash a fence every day?"
+
+That put the thing in a new light. Ben stopped nibbling his apple. Tom
+swept his brush daintily back and forth--stepped back to note the
+effect--added a touch here and there--criticised the effect again--Ben
+watching every move and getting more and more interested, more and more
+absorbed. Presently he said:
+
+"Say, Tom, let ME whitewash a little."
+
+Tom considered, was about to consent; but he altered his mind:
+
+"No--no--I reckon it wouldn't hardly do, Ben. You see, Aunt Polly's
+awful particular about this fence--right here on the street, you know
+--but if it was the back fence I wouldn't mind and SHE wouldn't. Yes,
+she's awful particular about this fence; it's got to be done very
+careful; I reckon there ain't one boy in a thousand, maybe two
+thousand, that can do it the way it's got to be done."
+
+"No--is that so? Oh come, now--lemme just try. Only just a little--I'd
+let YOU, if you was me, Tom."
+
+"Ben, I'd like to, honest injun; but Aunt Polly--well, Jim wanted to
+do it, but she wouldn't let him; Sid wanted to do it, and she wouldn't
+let Sid. Now don't you see how I'm fixed? If you was to tackle this
+fence and anything was to happen to it--"
+
+"Oh, shucks, I'll be just as careful. Now lemme try. Say--I'll give
+you the core of my apple."
+
+"Well, here--No, Ben, now don't. I'm afeard--"
+
+"I'll give you ALL of it!"
+
+Tom gave up the brush with reluctance in his face, but alacrity in his
+heart. And while the late steamer Big Missouri worked and sweated in
+the sun, the retired artist sat on a barrel in the shade close by,
+dangled his legs, munched his apple, and planned the slaughter of more
+innocents. There was no lack of material; boys happened along every
+little while; they came to jeer, but remained to whitewash. By the time
+Ben was fagged out, Tom had traded the next chance to Billy Fisher for
+a kite, in good repair; and when he played out, Johnny Miller bought in
+for a dead rat and a string to swing it with--and so on, and so on,
+hour after hour. And when the middle of the afternoon came, from being
+a poor poverty-stricken boy in the morning, Tom was literally rolling
+in wealth. He had besides the things before mentioned, twelve marbles,
+part of a jews-harp, a piece of blue bottle-glass to look through, a
+spool cannon, a key that wouldn't unlock anything, a fragment of chalk,
+a glass stopper of a decanter, a tin soldier, a couple of tadpoles, six
+fire-crackers, a kitten with only one eye, a brass doorknob, a
+dog-collar--but no dog--the handle of a knife, four pieces of
+orange-peel, and a dilapidated old window sash.
+
+He had had a nice, good, idle time all the while--plenty of company
+--and the fence had three coats of whitewash on it! If he hadn't run out
+of whitewash he would have bankrupted every boy in the village.
+
+Tom said to himself that it was not such a hollow world, after all. He
+had discovered a great law of human action, without knowing it--namely,
+that in order to make a man or a boy covet a thing, it is only
+necessary to make the thing difficult to attain. If he had been a great
+and wise philosopher, like the writer of this book, he would now have
+comprehended that Work consists of whatever a body is OBLIGED to do,
+and that Play consists of whatever a body is not obliged to do. And
+this would help him to understand why constructing artificial flowers
+or performing on a tread-mill is work, while rolling ten-pins or
+climbing Mont Blanc is only amusement. There are wealthy gentlemen in
+England who drive four-horse passenger-coaches twenty or thirty miles
+on a daily line, in the summer, because the privilege costs them
+considerable money; but if they were offered wages for the service,
+that would turn it into work and then they would resign.
+
+The boy mused awhile over the substantial change which had taken place
+in his worldly circumstances, and then wended toward headquarters to
+report.
+
+
+
+CHAPTER III
+
+TOM presented himself before Aunt Polly, who was sitting by an open
+window in a pleasant rearward apartment, which was bedroom,
+breakfast-room, dining-room, and library, combined. The balmy summer
+air, the restful quiet, the odor of the flowers, and the drowsing murmur
+of the bees had had their effect, and she was nodding over her knitting
+--for she had no company but the cat, and it was asleep in her lap. Her
+spectacles were propped up on her gray head for safety. She had thought
+that of course Tom had deserted long ago, and she wondered at seeing him
+place himself in her power again in this intrepid way. He said: "Mayn't
+I go and play now, aunt?"
+
+"What, a'ready? How much have you done?"
+
+"It's all done, aunt."
+
+"Tom, don't lie to me--I can't bear it."
+
+"I ain't, aunt; it IS all done."
+
+Aunt Polly placed small trust in such evidence. She went out to see
+for herself; and she would have been content to find twenty per cent.
+of Tom's statement true. When she found the entire fence whitewashed,
+and not only whitewashed but elaborately coated and recoated, and even
+a streak added to the ground, her astonishment was almost unspeakable.
+She said:
+
+"Well, I never! There's no getting round it, you can work when you're
+a mind to, Tom." And then she diluted the compliment by adding, "But
+it's powerful seldom you're a mind to, I'm bound to say. Well, go 'long
+and play; but mind you get back some time in a week, or I'll tan you."
+
+She was so overcome by the splendor of his achievement that she took
+him into the closet and selected a choice apple and delivered it to
+him, along with an improving lecture upon the added value and flavor a
+treat took to itself when it came without sin through virtuous effort.
+And while she closed with a happy Scriptural flourish, he "hooked" a
+doughnut.
+
+Then he skipped out, and saw Sid just starting up the outside stairway
+that led to the back rooms on the second floor. Clods were handy and
+the air was full of them in a twinkling. They raged around Sid like a
+hail-storm; and before Aunt Polly could collect her surprised faculties
+and sally to the rescue, six or seven clods had taken personal effect,
+and Tom was over the fence and gone. There was a gate, but as a general
+thing he was too crowded for time to make use of it. His soul was at
+peace, now that he had settled with Sid for calling attention to his
+black thread and getting him into trouble.
+
+Tom skirted the block, and came round into a muddy alley that led by
+the back of his aunt's cow-stable. He presently got safely beyond the
+reach of capture and punishment, and hastened toward the public square
+of the village, where two "military" companies of boys had met for
+conflict, according to previous appointment. Tom was General of one of
+these armies, Joe Harper (a bosom friend) General of the other. These
+two great commanders did not condescend to fight in person--that being
+better suited to the still smaller fry--but sat together on an eminence
+and conducted the field operations by orders delivered through
+aides-de-camp. Tom's army won a great victory, after a long and
+hard-fought battle. Then the dead were counted, prisoners exchanged,
+the terms of the next disagreement agreed upon, and the day for the
+necessary battle appointed; after which the armies fell into line and
+marched away, and Tom turned homeward alone.
+
+As he was passing by the house where Jeff Thatcher lived, he saw a new
+girl in the garden--a lovely little blue-eyed creature with yellow hair
+plaited into two long-tails, white summer frock and embroidered
+pantalettes. The fresh-crowned hero fell without firing a shot. A
+certain Amy Lawrence vanished out of his heart and left not even a
+memory of herself behind. He had thought he loved her to distraction;
+he had regarded his passion as adoration; and behold it was only a poor
+little evanescent partiality. He had been months winning her; she had
+confessed hardly a week ago; he had been the happiest and the proudest
+boy in the world only seven short days, and here in one instant of time
+she had gone out of his heart like a casual stranger whose visit is
+done.
+
+He worshipped this new angel with furtive eye, till he saw that she
+had discovered him; then he pretended he did not know she was present,
+and began to "show off" in all sorts of absurd boyish ways, in order to
+win her admiration. He kept up this grotesque foolishness for some
+time; but by-and-by, while he was in the midst of some dangerous
+gymnastic performances, he glanced aside and saw that the little girl
+was wending her way toward the house. Tom came up to the fence and
+leaned on it, grieving, and hoping she would tarry yet awhile longer.
+She halted a moment on the steps and then moved toward the door. Tom
+heaved a great sigh as she put her foot on the threshold. But his face
+lit up, right away, for she tossed a pansy over the fence a moment
+before she disappeared.
+
+The boy ran around and stopped within a foot or two of the flower, and
+then shaded his eyes with his hand and began to look down street as if
+he had discovered something of interest going on in that direction.
+Presently he picked up a straw and began trying to balance it on his
+nose, with his head tilted far back; and as he moved from side to side,
+in his efforts, he edged nearer and nearer toward the pansy; finally
+his bare foot rested upon it, his pliant toes closed upon it, and he
+hopped away with the treasure and disappeared round the corner. But
+only for a minute--only while he could button the flower inside his
+jacket, next his heart--or next his stomach, possibly, for he was not
+much posted in anatomy, and not hypercritical, anyway.
+
+He returned, now, and hung about the fence till nightfall, "showing
+off," as before; but the girl never exhibited herself again, though Tom
+comforted himself a little with the hope that she had been near some
+window, meantime, and been aware of his attentions. Finally he strode
+home reluctantly, with his poor head full of visions.
+
+All through supper his spirits were so high that his aunt wondered
+"what had got into the child." He took a good scolding about clodding
+Sid, and did not seem to mind it in the least. He tried to steal sugar
+under his aunt's very nose, and got his knuckles rapped for it. He said:
+
+"Aunt, you don't whack Sid when he takes it."
+
+"Well, Sid don't torment a body the way you do. You'd be always into
+that sugar if I warn't watching you."
+
+Presently she stepped into the kitchen, and Sid, happy in his
+immunity, reached for the sugar-bowl--a sort of glorying over Tom which
+was wellnigh unbearable. But Sid's fingers slipped and the bowl dropped
+and broke. Tom was in ecstasies. In such ecstasies that he even
+controlled his tongue and was silent. He said to himself that he would
+not speak a word, even when his aunt came in, but would sit perfectly
+still till she asked who did the mischief; and then he would tell, and
+there would be nothing so good in the world as to see that pet model
+"catch it." He was so brimful of exultation that he could hardly hold
+himself when the old lady came back and stood above the wreck
+discharging lightnings of wrath from over her spectacles. He said to
+himself, "Now it's coming!" And the next instant he was sprawling on
+the floor! The potent palm was uplifted to strike again when Tom cried
+out:
+
+"Hold on, now, what 'er you belting ME for?--Sid broke it!"
+
+Aunt Polly paused, perplexed, and Tom looked for healing pity. But
+when she got her tongue again, she only said:
+
+"Umf! Well, you didn't get a lick amiss, I reckon. You been into some
+other audacious mischief when I wasn't around, like enough."
+
+Then her conscience reproached her, and she yearned to say something
+kind and loving; but she judged that this would be construed into a
+confession that she had been in the wrong, and discipline forbade that.
+So she kept silence, and went about her affairs with a troubled heart.
+Tom sulked in a corner and exalted his woes. He knew that in her heart
+his aunt was on her knees to him, and he was morosely gratified by the
+consciousness of it. He would hang out no signals, he would take notice
+of none. He knew that a yearning glance fell upon him, now and then,
+through a film of tears, but he refused recognition of it. He pictured
+himself lying sick unto death and his aunt bending over him beseeching
+one little forgiving word, but he would turn his face to the wall, and
+die with that word unsaid. Ah, how would she feel then? And he pictured
+himself brought home from the river, dead, with his curls all wet, and
+his sore heart at rest. How she would throw herself upon him, and how
+her tears would fall like rain, and her lips pray God to give her back
+her boy and she would never, never abuse him any more! But he would lie
+there cold and white and make no sign--a poor little sufferer, whose
+griefs were at an end. He so worked upon his feelings with the pathos
+of these dreams, that he had to keep swallowing, he was so like to
+choke; and his eyes swam in a blur of water, which overflowed when he
+winked, and ran down and trickled from the end of his nose. And such a
+luxury to him was this petting of his sorrows, that he could not bear
+to have any worldly cheeriness or any grating delight intrude upon it;
+it was too sacred for such contact; and so, presently, when his cousin
+Mary danced in, all alive with the joy of seeing home again after an
+age-long visit of one week to the country, he got up and moved in
+clouds and darkness out at one door as she brought song and sunshine in
+at the other.
+
+He wandered far from the accustomed haunts of boys, and sought
+desolate places that were in harmony with his spirit. A log raft in the
+river invited him, and he seated himself on its outer edge and
+contemplated the dreary vastness of the stream, wishing, the while,
+that he could only be drowned, all at once and unconsciously, without
+undergoing the uncomfortable routine devised by nature. Then he thought
+of his flower. He got it out, rumpled and wilted, and it mightily
+increased his dismal felicity. He wondered if she would pity him if she
+knew? Would she cry, and wish that she had a right to put her arms
+around his neck and comfort him? Or would she turn coldly away like all
+the hollow world? This picture brought such an agony of pleasurable
+suffering that he worked it over and over again in his mind and set it
+up in new and varied lights, till he wore it threadbare. At last he
+rose up sighing and departed in the darkness.
+
+About half-past nine or ten o'clock he came along the deserted street
+to where the Adored Unknown lived; he paused a moment; no sound fell
+upon his listening ear; a candle was casting a dull glow upon the
+curtain of a second-story window. Was the sacred presence there? He
+climbed the fence, threaded his stealthy way through the plants, till
+he stood under that window; he looked up at it long, and with emotion;
+then he laid him down on the ground under it, disposing himself upon
+his back, with his hands clasped upon his breast and holding his poor
+wilted flower. And thus he would die--out in the cold world, with no
+shelter over his homeless head, no friendly hand to wipe the
+death-damps from his brow, no loving face to bend pityingly over him
+when the great agony came. And thus SHE would see him when she looked
+out upon the glad morning, and oh! would she drop one little tear upon
+his poor, lifeless form, would she heave one little sigh to see a bright
+young life so rudely blighted, so untimely cut down?
+
+The window went up, a maid-servant's discordant voice profaned the
+holy calm, and a deluge of water drenched the prone martyr's remains!
+
+The strangling hero sprang up with a relieving snort. There was a whiz
+as of a missile in the air, mingled with the murmur of a curse, a sound
+as of shivering glass followed, and a small, vague form went over the
+fence and shot away in the gloom.
+
+Not long after, as Tom, all undressed for bed, was surveying his
+drenched garments by the light of a tallow dip, Sid woke up; but if he
+had any dim idea of making any "references to allusions," he thought
+better of it and held his peace, for there was danger in Tom's eye.
+
+Tom turned in without the added vexation of prayers, and Sid made
+mental note of the omission.
+
+
+
+CHAPTER IV
+
+THE sun rose upon a tranquil world, and beamed down upon the peaceful
+village like a benediction. Breakfast over, Aunt Polly had family
+worship: it began with a prayer built from the ground up of solid
+courses of Scriptural quotations, welded together with a thin mortar of
+originality; and from the summit of this she delivered a grim chapter
+of the Mosaic Law, as from Sinai.
+
+Then Tom girded up his loins, so to speak, and went to work to "get
+his verses." Sid had learned his lesson days before. Tom bent all his
+energies to the memorizing of five verses, and he chose part of the
+Sermon on the Mount, because he could find no verses that were shorter.
+At the end of half an hour Tom had a vague general idea of his lesson,
+but no more, for his mind was traversing the whole field of human
+thought, and his hands were busy with distracting recreations. Mary
+took his book to hear him recite, and he tried to find his way through
+the fog:
+
+"Blessed are the--a--a--"
+
+"Poor"--
+
+"Yes--poor; blessed are the poor--a--a--"
+
+"In spirit--"
+
+"In spirit; blessed are the poor in spirit, for they--they--"
+
+"THEIRS--"
+
+"For THEIRS. Blessed are the poor in spirit, for theirs is the kingdom
+of heaven. Blessed are they that mourn, for they--they--"
+
+"Sh--"
+
+"For they--a--"
+
+"S, H, A--"
+
+"For they S, H--Oh, I don't know what it is!"
+
+"SHALL!"
+
+"Oh, SHALL! for they shall--for they shall--a--a--shall mourn--a--a--
+blessed are they that shall--they that--a--they that shall mourn, for
+they shall--a--shall WHAT? Why don't you tell me, Mary?--what do you
+want to be so mean for?"
+
+"Oh, Tom, you poor thick-headed thing, I'm not teasing you. I wouldn't
+do that. You must go and learn it again. Don't you be discouraged, Tom,
+you'll manage it--and if you do, I'll give you something ever so nice.
+There, now, that's a good boy."
+
+"All right! What is it, Mary, tell me what it is."
+
+"Never you mind, Tom. You know if I say it's nice, it is nice."
+
+"You bet you that's so, Mary. All right, I'll tackle it again."
+
+And he did "tackle it again"--and under the double pressure of
+curiosity and prospective gain he did it with such spirit that he
+accomplished a shining success. Mary gave him a brand-new "Barlow"
+knife worth twelve and a half cents; and the convulsion of delight that
+swept his system shook him to his foundations. True, the knife would
+not cut anything, but it was a "sure-enough" Barlow, and there was
+inconceivable grandeur in that--though where the Western boys ever got
+the idea that such a weapon could possibly be counterfeited to its
+injury is an imposing mystery and will always remain so, perhaps. Tom
+contrived to scarify the cupboard with it, and was arranging to begin
+on the bureau, when he was called off to dress for Sunday-school.
+
+Mary gave him a tin basin of water and a piece of soap, and he went
+outside the door and set the basin on a little bench there; then he
+dipped the soap in the water and laid it down; turned up his sleeves;
+poured out the water on the ground, gently, and then entered the
+kitchen and began to wipe his face diligently on the towel behind the
+door. But Mary removed the towel and said:
+
+"Now ain't you ashamed, Tom. You mustn't be so bad. Water won't hurt
+you."
+
+Tom was a trifle disconcerted. The basin was refilled, and this time
+he stood over it a little while, gathering resolution; took in a big
+breath and began. When he entered the kitchen presently, with both eyes
+shut and groping for the towel with his hands, an honorable testimony
+of suds and water was dripping from his face. But when he emerged from
+the towel, he was not yet satisfactory, for the clean territory stopped
+short at his chin and his jaws, like a mask; below and beyond this line
+there was a dark expanse of unirrigated soil that spread downward in
+front and backward around his neck. Mary took him in hand, and when she
+was done with him he was a man and a brother, without distinction of
+color, and his saturated hair was neatly brushed, and its short curls
+wrought into a dainty and symmetrical general effect. [He privately
+smoothed out the curls, with labor and difficulty, and plastered his
+hair close down to his head; for he held curls to be effeminate, and
+his own filled his life with bitterness.] Then Mary got out a suit of
+his clothing that had been used only on Sundays during two years--they
+were simply called his "other clothes"--and so by that we know the
+size of his wardrobe. The girl "put him to rights" after he had dressed
+himself; she buttoned his neat roundabout up to his chin, turned his
+vast shirt collar down over his shoulders, brushed him off and crowned
+him with his speckled straw hat. He now looked exceedingly improved and
+uncomfortable. He was fully as uncomfortable as he looked; for there
+was a restraint about whole clothes and cleanliness that galled him. He
+hoped that Mary would forget his shoes, but the hope was blighted; she
+coated them thoroughly with tallow, as was the custom, and brought them
+out. He lost his temper and said he was always being made to do
+everything he didn't want to do. But Mary said, persuasively:
+
+"Please, Tom--that's a good boy."
+
+So he got into the shoes snarling. Mary was soon ready, and the three
+children set out for Sunday-school--a place that Tom hated with his
+whole heart; but Sid and Mary were fond of it.
+
+Sabbath-school hours were from nine to half-past ten; and then church
+service. Two of the children always remained for the sermon
+voluntarily, and the other always remained too--for stronger reasons.
+The church's high-backed, uncushioned pews would seat about three
+hundred persons; the edifice was but a small, plain affair, with a sort
+of pine board tree-box on top of it for a steeple. At the door Tom
+dropped back a step and accosted a Sunday-dressed comrade:
+
+"Say, Billy, got a yaller ticket?"
+
+"Yes."
+
+"What'll you take for her?"
+
+"What'll you give?"
+
+"Piece of lickrish and a fish-hook."
+
+"Less see 'em."
+
+Tom exhibited. They were satisfactory, and the property changed hands.
+Then Tom traded a couple of white alleys for three red tickets, and
+some small trifle or other for a couple of blue ones. He waylaid other
+boys as they came, and went on buying tickets of various colors ten or
+fifteen minutes longer. He entered the church, now, with a swarm of
+clean and noisy boys and girls, proceeded to his seat and started a
+quarrel with the first boy that came handy. The teacher, a grave,
+elderly man, interfered; then turned his back a moment and Tom pulled a
+boy's hair in the next bench, and was absorbed in his book when the boy
+turned around; stuck a pin in another boy, presently, in order to hear
+him say "Ouch!" and got a new reprimand from his teacher. Tom's whole
+class were of a pattern--restless, noisy, and troublesome. When they
+came to recite their lessons, not one of them knew his verses
+perfectly, but had to be prompted all along. However, they worried
+through, and each got his reward--in small blue tickets, each with a
+passage of Scripture on it; each blue ticket was pay for two verses of
+the recitation. Ten blue tickets equalled a red one, and could be
+exchanged for it; ten red tickets equalled a yellow one; for ten yellow
+tickets the superintendent gave a very plainly bound Bible (worth forty
+cents in those easy times) to the pupil. How many of my readers would
+have the industry and application to memorize two thousand verses, even
+for a Dore Bible? And yet Mary had acquired two Bibles in this way--it
+was the patient work of two years--and a boy of German parentage had
+won four or five. He once recited three thousand verses without
+stopping; but the strain upon his mental faculties was too great, and
+he was little better than an idiot from that day forth--a grievous
+misfortune for the school, for on great occasions, before company, the
+superintendent (as Tom expressed it) had always made this boy come out
+and "spread himself." Only the older pupils managed to keep their
+tickets and stick to their tedious work long enough to get a Bible, and
+so the delivery of one of these prizes was a rare and noteworthy
+circumstance; the successful pupil was so great and conspicuous for
+that day that on the spot every scholar's heart was fired with a fresh
+ambition that often lasted a couple of weeks. It is possible that Tom's
+mental stomach had never really hungered for one of those prizes, but
+unquestionably his entire being had for many a day longed for the glory
+and the eclat that came with it.
+
+In due course the superintendent stood up in front of the pulpit, with
+a closed hymn-book in his hand and his forefinger inserted between its
+leaves, and commanded attention. When a Sunday-school superintendent
+makes his customary little speech, a hymn-book in the hand is as
+necessary as is the inevitable sheet of music in the hand of a singer
+who stands forward on the platform and sings a solo at a concert
+--though why, is a mystery: for neither the hymn-book nor the sheet of
+music is ever referred to by the sufferer. This superintendent was a
+slim creature of thirty-five, with a sandy goatee and short sandy hair;
+he wore a stiff standing-collar whose upper edge almost reached his
+ears and whose sharp points curved forward abreast the corners of his
+mouth--a fence that compelled a straight lookout ahead, and a turning
+of the whole body when a side view was required; his chin was propped
+on a spreading cravat which was as broad and as long as a bank-note,
+and had fringed ends; his boot toes were turned sharply up, in the
+fashion of the day, like sleigh-runners--an effect patiently and
+laboriously produced by the young men by sitting with their toes
+pressed against a wall for hours together. Mr. Walters was very earnest
+of mien, and very sincere and honest at heart; and he held sacred
+things and places in such reverence, and so separated them from worldly
+matters, that unconsciously to himself his Sunday-school voice had
+acquired a peculiar intonation which was wholly absent on week-days. He
+began after this fashion:
+
+"Now, children, I want you all to sit up just as straight and pretty
+as you can and give me all your attention for a minute or two. There
+--that is it. That is the way good little boys and girls should do. I see
+one little girl who is looking out of the window--I am afraid she
+thinks I am out there somewhere--perhaps up in one of the trees making
+a speech to the little birds. [Applausive titter.] I want to tell you
+how good it makes me feel to see so many bright, clean little faces
+assembled in a place like this, learning to do right and be good." And
+so forth and so on. It is not necessary to set down the rest of the
+oration. It was of a pattern which does not vary, and so it is familiar
+to us all.
+
+The latter third of the speech was marred by the resumption of fights
+and other recreations among certain of the bad boys, and by fidgetings
+and whisperings that extended far and wide, washing even to the bases
+of isolated and incorruptible rocks like Sid and Mary. But now every
+sound ceased suddenly, with the subsidence of Mr. Walters' voice, and
+the conclusion of the speech was received with a burst of silent
+gratitude.
+
+A good part of the whispering had been occasioned by an event which
+was more or less rare--the entrance of visitors: lawyer Thatcher,
+accompanied by a very feeble and aged man; a fine, portly, middle-aged
+gentleman with iron-gray hair; and a dignified lady who was doubtless
+the latter's wife. The lady was leading a child. Tom had been restless
+and full of chafings and repinings; conscience-smitten, too--he could
+not meet Amy Lawrence's eye, he could not brook her loving gaze. But
+when he saw this small new-comer his soul was all ablaze with bliss in
+a moment. The next moment he was "showing off" with all his might
+--cuffing boys, pulling hair, making faces--in a word, using every art
+that seemed likely to fascinate a girl and win her applause. His
+exaltation had but one alloy--the memory of his humiliation in this
+angel's garden--and that record in sand was fast washing out, under
+the waves of happiness that were sweeping over it now.
+
+The visitors were given the highest seat of honor, and as soon as Mr.
+Walters' speech was finished, he introduced them to the school. The
+middle-aged man turned out to be a prodigious personage--no less a one
+than the county judge--altogether the most august creation these
+children had ever looked upon--and they wondered what kind of material
+he was made of--and they half wanted to hear him roar, and were half
+afraid he might, too. He was from Constantinople, twelve miles away--so
+he had travelled, and seen the world--these very eyes had looked upon
+the county court-house--which was said to have a tin roof. The awe
+which these reflections inspired was attested by the impressive silence
+and the ranks of staring eyes. This was the great Judge Thatcher,
+brother of their own lawyer. Jeff Thatcher immediately went forward, to
+be familiar with the great man and be envied by the school. It would
+have been music to his soul to hear the whisperings:
+
+"Look at him, Jim! He's a going up there. Say--look! he's a going to
+shake hands with him--he IS shaking hands with him! By jings, don't you
+wish you was Jeff?"
+
+Mr. Walters fell to "showing off," with all sorts of official
+bustlings and activities, giving orders, delivering judgments,
+discharging directions here, there, everywhere that he could find a
+target. The librarian "showed off"--running hither and thither with his
+arms full of books and making a deal of the splutter and fuss that
+insect authority delights in. The young lady teachers "showed off"
+--bending sweetly over pupils that were lately being boxed, lifting
+pretty warning fingers at bad little boys and patting good ones
+lovingly. The young gentlemen teachers "showed off" with small
+scoldings and other little displays of authority and fine attention to
+discipline--and most of the teachers, of both sexes, found business up
+at the library, by the pulpit; and it was business that frequently had
+to be done over again two or three times (with much seeming vexation).
+The little girls "showed off" in various ways, and the little boys
+"showed off" with such diligence that the air was thick with paper wads
+and the murmur of scufflings. And above it all the great man sat and
+beamed a majestic judicial smile upon all the house, and warmed himself
+in the sun of his own grandeur--for he was "showing off," too.
+
+There was only one thing wanting to make Mr. Walters' ecstasy
+complete, and that was a chance to deliver a Bible-prize and exhibit a
+prodigy. Several pupils had a few yellow tickets, but none had enough
+--he had been around among the star pupils inquiring. He would have given
+worlds, now, to have that German lad back again with a sound mind.
+
+And now at this moment, when hope was dead, Tom Sawyer came forward
+with nine yellow tickets, nine red tickets, and ten blue ones, and
+demanded a Bible. This was a thunderbolt out of a clear sky. Walters
+was not expecting an application from this source for the next ten
+years. But there was no getting around it--here were the certified
+checks, and they were good for their face. Tom was therefore elevated
+to a place with the Judge and the other elect, and the great news was
+announced from headquarters. It was the most stunning surprise of the
+decade, and so profound was the sensation that it lifted the new hero
+up to the judicial one's altitude, and the school had two marvels to
+gaze upon in place of one. The boys were all eaten up with envy--but
+those that suffered the bitterest pangs were those who perceived too
+late that they themselves had contributed to this hated splendor by
+trading tickets to Tom for the wealth he had amassed in selling
+whitewashing privileges. These despised themselves, as being the dupes
+of a wily fraud, a guileful snake in the grass.
+
+The prize was delivered to Tom with as much effusion as the
+superintendent could pump up under the circumstances; but it lacked
+somewhat of the true gush, for the poor fellow's instinct taught him
+that there was a mystery here that could not well bear the light,
+perhaps; it was simply preposterous that this boy had warehoused two
+thousand sheaves of Scriptural wisdom on his premises--a dozen would
+strain his capacity, without a doubt.
+
+Amy Lawrence was proud and glad, and she tried to make Tom see it in
+her face--but he wouldn't look. She wondered; then she was just a grain
+troubled; next a dim suspicion came and went--came again; she watched;
+a furtive glance told her worlds--and then her heart broke, and she was
+jealous, and angry, and the tears came and she hated everybody. Tom
+most of all (she thought).
+
+Tom was introduced to the Judge; but his tongue was tied, his breath
+would hardly come, his heart quaked--partly because of the awful
+greatness of the man, but mainly because he was her parent. He would
+have liked to fall down and worship him, if it were in the dark. The
+Judge put his hand on Tom's head and called him a fine little man, and
+asked him what his name was. The boy stammered, gasped, and got it out:
+
+"Tom."
+
+"Oh, no, not Tom--it is--"
+
+"Thomas."
+
+"Ah, that's it. I thought there was more to it, maybe. That's very
+well. But you've another one I daresay, and you'll tell it to me, won't
+you?"
+
+"Tell the gentleman your other name, Thomas," said Walters, "and say
+sir. You mustn't forget your manners."
+
+"Thomas Sawyer--sir."
+
+"That's it! That's a good boy. Fine boy. Fine, manly little fellow.
+Two thousand verses is a great many--very, very great many. And you
+never can be sorry for the trouble you took to learn them; for
+knowledge is worth more than anything there is in the world; it's what
+makes great men and good men; you'll be a great man and a good man
+yourself, some day, Thomas, and then you'll look back and say, It's all
+owing to the precious Sunday-school privileges of my boyhood--it's all
+owing to my dear teachers that taught me to learn--it's all owing to
+the good superintendent, who encouraged me, and watched over me, and
+gave me a beautiful Bible--a splendid elegant Bible--to keep and have
+it all for my own, always--it's all owing to right bringing up! That is
+what you will say, Thomas--and you wouldn't take any money for those
+two thousand verses--no indeed you wouldn't. And now you wouldn't mind
+telling me and this lady some of the things you've learned--no, I know
+you wouldn't--for we are proud of little boys that learn. Now, no
+doubt you know the names of all the twelve disciples. Won't you tell us
+the names of the first two that were appointed?"
+
+Tom was tugging at a button-hole and looking sheepish. He blushed,
+now, and his eyes fell. Mr. Walters' heart sank within him. He said to
+himself, it is not possible that the boy can answer the simplest
+question--why DID the Judge ask him? Yet he felt obliged to speak up
+and say:
+
+"Answer the gentleman, Thomas--don't be afraid."
+
+Tom still hung fire.
+
+"Now I know you'll tell me," said the lady. "The names of the first
+two disciples were--"
+
+"DAVID AND GOLIAH!"
+
+Let us draw the curtain of charity over the rest of the scene.
+
+
+
+CHAPTER V
+
+ABOUT half-past ten the cracked bell of the small church began to
+ring, and presently the people began to gather for the morning sermon.
+The Sunday-school children distributed themselves about the house and
+occupied pews with their parents, so as to be under supervision. Aunt
+Polly came, and Tom and Sid and Mary sat with her--Tom being placed
+next the aisle, in order that he might be as far away from the open
+window and the seductive outside summer scenes as possible. The crowd
+filed up the aisles: the aged and needy postmaster, who had seen better
+days; the mayor and his wife--for they had a mayor there, among other
+unnecessaries; the justice of the peace; the widow Douglass, fair,
+smart, and forty, a generous, good-hearted soul and well-to-do, her
+hill mansion the only palace in the town, and the most hospitable and
+much the most lavish in the matter of festivities that St. Petersburg
+could boast; the bent and venerable Major and Mrs. Ward; lawyer
+Riverson, the new notable from a distance; next the belle of the
+village, followed by a troop of lawn-clad and ribbon-decked young
+heart-breakers; then all the young clerks in town in a body--for they
+had stood in the vestibule sucking their cane-heads, a circling wall of
+oiled and simpering admirers, till the last girl had run their gantlet;
+and last of all came the Model Boy, Willie Mufferson, taking as heedful
+care of his mother as if she were cut glass. He always brought his
+mother to church, and was the pride of all the matrons. The boys all
+hated him, he was so good. And besides, he had been "thrown up to them"
+so much. His white handkerchief was hanging out of his pocket behind, as
+usual on Sundays--accidentally. Tom had no handkerchief, and he looked
+upon boys who had as snobs.
+
+The congregation being fully assembled, now, the bell rang once more,
+to warn laggards and stragglers, and then a solemn hush fell upon the
+church which was only broken by the tittering and whispering of the
+choir in the gallery. The choir always tittered and whispered all
+through service. There was once a church choir that was not ill-bred,
+but I have forgotten where it was, now. It was a great many years ago,
+and I can scarcely remember anything about it, but I think it was in
+some foreign country.
+
+The minister gave out the hymn, and read it through with a relish, in
+a peculiar style which was much admired in that part of the country.
+His voice began on a medium key and climbed steadily up till it reached
+a certain point, where it bore with strong emphasis upon the topmost
+word and then plunged down as if from a spring-board:
+
+  Shall I be car-ri-ed toe the skies, on flow'ry BEDS of ease,
+
+  Whilst others fight to win the prize, and sail thro' BLOODY seas?
+
+He was regarded as a wonderful reader. At church "sociables" he was
+always called upon to read poetry; and when he was through, the ladies
+would lift up their hands and let them fall helplessly in their laps,
+and "wall" their eyes, and shake their heads, as much as to say, "Words
+cannot express it; it is too beautiful, TOO beautiful for this mortal
+earth."
+
+After the hymn had been sung, the Rev. Mr. Sprague turned himself into
+a bulletin-board, and read off "notices" of meetings and societies and
+things till it seemed that the list would stretch out to the crack of
+doom--a queer custom which is still kept up in America, even in cities,
+away here in this age of abundant newspapers. Often, the less there is
+to justify a traditional custom, the harder it is to get rid of it.
+
+And now the minister prayed. A good, generous prayer it was, and went
+into details: it pleaded for the church, and the little children of the
+church; for the other churches of the village; for the village itself;
+for the county; for the State; for the State officers; for the United
+States; for the churches of the United States; for Congress; for the
+President; for the officers of the Government; for poor sailors, tossed
+by stormy seas; for the oppressed millions groaning under the heel of
+European monarchies and Oriental despotisms; for such as have the light
+and the good tidings, and yet have not eyes to see nor ears to hear
+withal; for the heathen in the far islands of the sea; and closed with
+a supplication that the words he was about to speak might find grace
+and favor, and be as seed sown in fertile ground, yielding in time a
+grateful harvest of good. Amen.
+
+There was a rustling of dresses, and the standing congregation sat
+down. The boy whose history this book relates did not enjoy the prayer,
+he only endured it--if he even did that much. He was restive all
+through it; he kept tally of the details of the prayer, unconsciously
+--for he was not listening, but he knew the ground of old, and the
+clergyman's regular route over it--and when a little trifle of new
+matter was interlarded, his ear detected it and his whole nature
+resented it; he considered additions unfair, and scoundrelly. In the
+midst of the prayer a fly had lit on the back of the pew in front of
+him and tortured his spirit by calmly rubbing its hands together,
+embracing its head with its arms, and polishing it so vigorously that
+it seemed to almost part company with the body, and the slender thread
+of a neck was exposed to view; scraping its wings with its hind legs
+and smoothing them to its body as if they had been coat-tails; going
+through its whole toilet as tranquilly as if it knew it was perfectly
+safe. As indeed it was; for as sorely as Tom's hands itched to grab for
+it they did not dare--he believed his soul would be instantly destroyed
+if he did such a thing while the prayer was going on. But with the
+closing sentence his hand began to curve and steal forward; and the
+instant the "Amen" was out the fly was a prisoner of war. His aunt
+detected the act and made him let it go.
+
+The minister gave out his text and droned along monotonously through
+an argument that was so prosy that many a head by and by began to nod
+--and yet it was an argument that dealt in limitless fire and brimstone
+and thinned the predestined elect down to a company so small as to be
+hardly worth the saving. Tom counted the pages of the sermon; after
+church he always knew how many pages there had been, but he seldom knew
+anything else about the discourse. However, this time he was really
+interested for a little while. The minister made a grand and moving
+picture of the assembling together of the world's hosts at the
+millennium when the lion and the lamb should lie down together and a
+little child should lead them. But the pathos, the lesson, the moral of
+the great spectacle were lost upon the boy; he only thought of the
+conspicuousness of the principal character before the on-looking
+nations; his face lit with the thought, and he said to himself that he
+wished he could be that child, if it was a tame lion.
+
+Now he lapsed into suffering again, as the dry argument was resumed.
+Presently he bethought him of a treasure he had and got it out. It was
+a large black beetle with formidable jaws--a "pinchbug," he called it.
+It was in a percussion-cap box. The first thing the beetle did was to
+take him by the finger. A natural fillip followed, the beetle went
+floundering into the aisle and lit on its back, and the hurt finger
+went into the boy's mouth. The beetle lay there working its helpless
+legs, unable to turn over. Tom eyed it, and longed for it; but it was
+safe out of his reach. Other people uninterested in the sermon found
+relief in the beetle, and they eyed it too. Presently a vagrant poodle
+dog came idling along, sad at heart, lazy with the summer softness and
+the quiet, weary of captivity, sighing for change. He spied the beetle;
+the drooping tail lifted and wagged. He surveyed the prize; walked
+around it; smelt at it from a safe distance; walked around it again;
+grew bolder, and took a closer smell; then lifted his lip and made a
+gingerly snatch at it, just missing it; made another, and another;
+began to enjoy the diversion; subsided to his stomach with the beetle
+between his paws, and continued his experiments; grew weary at last,
+and then indifferent and absent-minded. His head nodded, and little by
+little his chin descended and touched the enemy, who seized it. There
+was a sharp yelp, a flirt of the poodle's head, and the beetle fell a
+couple of yards away, and lit on its back once more. The neighboring
+spectators shook with a gentle inward joy, several faces went behind
+fans and handkerchiefs, and Tom was entirely happy. The dog looked
+foolish, and probably felt so; but there was resentment in his heart,
+too, and a craving for revenge. So he went to the beetle and began a
+wary attack on it again; jumping at it from every point of a circle,
+lighting with his fore-paws within an inch of the creature, making even
+closer snatches at it with his teeth, and jerking his head till his
+ears flapped again. But he grew tired once more, after a while; tried
+to amuse himself with a fly but found no relief; followed an ant
+around, with his nose close to the floor, and quickly wearied of that;
+yawned, sighed, forgot the beetle entirely, and sat down on it. Then
+there was a wild yelp of agony and the poodle went sailing up the
+aisle; the yelps continued, and so did the dog; he crossed the house in
+front of the altar; he flew down the other aisle; he crossed before the
+doors; he clamored up the home-stretch; his anguish grew with his
+progress, till presently he was but a woolly comet moving in its orbit
+with the gleam and the speed of light. At last the frantic sufferer
+sheered from its course, and sprang into its master's lap; he flung it
+out of the window, and the voice of distress quickly thinned away and
+died in the distance.
+
+By this time the whole church was red-faced and suffocating with
+suppressed laughter, and the sermon had come to a dead standstill. The
+discourse was resumed presently, but it went lame and halting, all
+possibility of impressiveness being at an end; for even the gravest
+sentiments were constantly being received with a smothered burst of
+unholy mirth, under cover of some remote pew-back, as if the poor
+parson had said a rarely facetious thing. It was a genuine relief to
+the whole congregation when the ordeal was over and the benediction
+pronounced.
+
+Tom Sawyer went home quite cheerful, thinking to himself that there
+was some satisfaction about divine service when there was a bit of
+variety in it. He had but one marring thought; he was willing that the
+dog should play with his pinchbug, but he did not think it was upright
+in him to carry it off.
+
+
+
+CHAPTER VI
+
+MONDAY morning found Tom Sawyer miserable. Monday morning always found
+him so--because it began another week's slow suffering in school. He
+generally began that day with wishing he had had no intervening
+holiday, it made the going into captivity and fetters again so much
+more odious.
+
+Tom lay thinking. Presently it occurred to him that he wished he was
+sick; then he could stay home from school. Here was a vague
+possibility. He canvassed his system. No ailment was found, and he
+investigated again. This time he thought he could detect colicky
+symptoms, and he began to encourage them with considerable hope. But
+they soon grew feeble, and presently died wholly away. He reflected
+further. Suddenly he discovered something. One of his upper front teeth
+was loose. This was lucky; he was about to begin to groan, as a
+"starter," as he called it, when it occurred to him that if he came
+into court with that argument, his aunt would pull it out, and that
+would hurt. So he thought he would hold the tooth in reserve for the
+present, and seek further. Nothing offered for some little time, and
+then he remembered hearing the doctor tell about a certain thing that
+laid up a patient for two or three weeks and threatened to make him
+lose a finger. So the boy eagerly drew his sore toe from under the
+sheet and held it up for inspection. But now he did not know the
+necessary symptoms. However, it seemed well worth while to chance it,
+so he fell to groaning with considerable spirit.
+
+But Sid slept on unconscious.
+
+Tom groaned louder, and fancied that he began to feel pain in the toe.
+
+No result from Sid.
+
+Tom was panting with his exertions by this time. He took a rest and
+then swelled himself up and fetched a succession of admirable groans.
+
+Sid snored on.
+
+Tom was aggravated. He said, "Sid, Sid!" and shook him. This course
+worked well, and Tom began to groan again. Sid yawned, stretched, then
+brought himself up on his elbow with a snort, and began to stare at
+Tom. Tom went on groaning. Sid said:
+
+"Tom! Say, Tom!" [No response.] "Here, Tom! TOM! What is the matter,
+Tom?" And he shook him and looked in his face anxiously.
+
+Tom moaned out:
+
+"Oh, don't, Sid. Don't joggle me."
+
+"Why, what's the matter, Tom? I must call auntie."
+
+"No--never mind. It'll be over by and by, maybe. Don't call anybody."
+
+"But I must! DON'T groan so, Tom, it's awful. How long you been this
+way?"
+
+"Hours. Ouch! Oh, don't stir so, Sid, you'll kill me."
+
+"Tom, why didn't you wake me sooner? Oh, Tom, DON'T! It makes my
+flesh crawl to hear you. Tom, what is the matter?"
+
+"I forgive you everything, Sid. [Groan.] Everything you've ever done
+to me. When I'm gone--"
+
+"Oh, Tom, you ain't dying, are you? Don't, Tom--oh, don't. Maybe--"
+
+"I forgive everybody, Sid. [Groan.] Tell 'em so, Sid. And Sid, you
+give my window-sash and my cat with one eye to that new girl that's
+come to town, and tell her--"
+
+But Sid had snatched his clothes and gone. Tom was suffering in
+reality, now, so handsomely was his imagination working, and so his
+groans had gathered quite a genuine tone.
+
+Sid flew down-stairs and said:
+
+"Oh, Aunt Polly, come! Tom's dying!"
+
+"Dying!"
+
+"Yes'm. Don't wait--come quick!"
+
+"Rubbage! I don't believe it!"
+
+But she fled up-stairs, nevertheless, with Sid and Mary at her heels.
+And her face grew white, too, and her lip trembled. When she reached
+the bedside she gasped out:
+
+"You, Tom! Tom, what's the matter with you?"
+
+"Oh, auntie, I'm--"
+
+"What's the matter with you--what is the matter with you, child?"
+
+"Oh, auntie, my sore toe's mortified!"
+
+The old lady sank down into a chair and laughed a little, then cried a
+little, then did both together. This restored her and she said:
+
+"Tom, what a turn you did give me. Now you shut up that nonsense and
+climb out of this."
+
+The groans ceased and the pain vanished from the toe. The boy felt a
+little foolish, and he said:
+
+"Aunt Polly, it SEEMED mortified, and it hurt so I never minded my
+tooth at all."
+
+"Your tooth, indeed! What's the matter with your tooth?"
+
+"One of them's loose, and it aches perfectly awful."
+
+"There, there, now, don't begin that groaning again. Open your mouth.
+Well--your tooth IS loose, but you're not going to die about that.
+Mary, get me a silk thread, and a chunk of fire out of the kitchen."
+
+Tom said:
+
+"Oh, please, auntie, don't pull it out. It don't hurt any more. I wish
+I may never stir if it does. Please don't, auntie. I don't want to stay
+home from school."
+
+"Oh, you don't, don't you? So all this row was because you thought
+you'd get to stay home from school and go a-fishing? Tom, Tom, I love
+you so, and you seem to try every way you can to break my old heart
+with your outrageousness." By this time the dental instruments were
+ready. The old lady made one end of the silk thread fast to Tom's tooth
+with a loop and tied the other to the bedpost. Then she seized the
+chunk of fire and suddenly thrust it almost into the boy's face. The
+tooth hung dangling by the bedpost, now.
+
+But all trials bring their compensations. As Tom wended to school
+after breakfast, he was the envy of every boy he met because the gap in
+his upper row of teeth enabled him to expectorate in a new and
+admirable way. He gathered quite a following of lads interested in the
+exhibition; and one that had cut his finger and had been a centre of
+fascination and homage up to this time, now found himself suddenly
+without an adherent, and shorn of his glory. His heart was heavy, and
+he said with a disdain which he did not feel that it wasn't anything to
+spit like Tom Sawyer; but another boy said, "Sour grapes!" and he
+wandered away a dismantled hero.
+
+Shortly Tom came upon the juvenile pariah of the village, Huckleberry
+Finn, son of the town drunkard. Huckleberry was cordially hated and
+dreaded by all the mothers of the town, because he was idle and lawless
+and vulgar and bad--and because all their children admired him so, and
+delighted in his forbidden society, and wished they dared to be like
+him. Tom was like the rest of the respectable boys, in that he envied
+Huckleberry his gaudy outcast condition, and was under strict orders
+not to play with him. So he played with him every time he got a chance.
+Huckleberry was always dressed in the cast-off clothes of full-grown
+men, and they were in perennial bloom and fluttering with rags. His hat
+was a vast ruin with a wide crescent lopped out of its brim; his coat,
+when he wore one, hung nearly to his heels and had the rearward buttons
+far down the back; but one suspender supported his trousers; the seat
+of the trousers bagged low and contained nothing, the fringed legs
+dragged in the dirt when not rolled up.
+
+Huckleberry came and went, at his own free will. He slept on doorsteps
+in fine weather and in empty hogsheads in wet; he did not have to go to
+school or to church, or call any being master or obey anybody; he could
+go fishing or swimming when and where he chose, and stay as long as it
+suited him; nobody forbade him to fight; he could sit up as late as he
+pleased; he was always the first boy that went barefoot in the spring
+and the last to resume leather in the fall; he never had to wash, nor
+put on clean clothes; he could swear wonderfully. In a word, everything
+that goes to make life precious that boy had. So thought every
+harassed, hampered, respectable boy in St. Petersburg.
+
+Tom hailed the romantic outcast:
+
+"Hello, Huckleberry!"
+
+"Hello yourself, and see how you like it."
+
+"What's that you got?"
+
+"Dead cat."
+
+"Lemme see him, Huck. My, he's pretty stiff. Where'd you get him?"
+
+"Bought him off'n a boy."
+
+"What did you give?"
+
+"I give a blue ticket and a bladder that I got at the slaughter-house."
+
+"Where'd you get the blue ticket?"
+
+"Bought it off'n Ben Rogers two weeks ago for a hoop-stick."
+
+"Say--what is dead cats good for, Huck?"
+
+"Good for? Cure warts with."
+
+"No! Is that so? I know something that's better."
+
+"I bet you don't. What is it?"
+
+"Why, spunk-water."
+
+"Spunk-water! I wouldn't give a dern for spunk-water."
+
+"You wouldn't, wouldn't you? D'you ever try it?"
+
+"No, I hain't. But Bob Tanner did."
+
+"Who told you so!"
+
+"Why, he told Jeff Thatcher, and Jeff told Johnny Baker, and Johnny
+told Jim Hollis, and Jim told Ben Rogers, and Ben told a nigger, and
+the nigger told me. There now!"
+
+"Well, what of it? They'll all lie. Leastways all but the nigger. I
+don't know HIM. But I never see a nigger that WOULDN'T lie. Shucks! Now
+you tell me how Bob Tanner done it, Huck."
+
+"Why, he took and dipped his hand in a rotten stump where the
+rain-water was."
+
+"In the daytime?"
+
+"Certainly."
+
+"With his face to the stump?"
+
+"Yes. Least I reckon so."
+
+"Did he say anything?"
+
+"I don't reckon he did. I don't know."
+
+"Aha! Talk about trying to cure warts with spunk-water such a blame
+fool way as that! Why, that ain't a-going to do any good. You got to go
+all by yourself, to the middle of the woods, where you know there's a
+spunk-water stump, and just as it's midnight you back up against the
+stump and jam your hand in and say:
+
+  'Barley-corn, barley-corn, injun-meal shorts,
+   Spunk-water, spunk-water, swaller these warts,'
+
+and then walk away quick, eleven steps, with your eyes shut, and then
+turn around three times and walk home without speaking to anybody.
+Because if you speak the charm's busted."
+
+"Well, that sounds like a good way; but that ain't the way Bob Tanner
+done."
+
+"No, sir, you can bet he didn't, becuz he's the wartiest boy in this
+town; and he wouldn't have a wart on him if he'd knowed how to work
+spunk-water. I've took off thousands of warts off of my hands that way,
+Huck. I play with frogs so much that I've always got considerable many
+warts. Sometimes I take 'em off with a bean."
+
+"Yes, bean's good. I've done that."
+
+"Have you? What's your way?"
+
+"You take and split the bean, and cut the wart so as to get some
+blood, and then you put the blood on one piece of the bean and take and
+dig a hole and bury it 'bout midnight at the crossroads in the dark of
+the moon, and then you burn up the rest of the bean. You see that piece
+that's got the blood on it will keep drawing and drawing, trying to
+fetch the other piece to it, and so that helps the blood to draw the
+wart, and pretty soon off she comes."
+
+"Yes, that's it, Huck--that's it; though when you're burying it if you
+say 'Down bean; off wart; come no more to bother me!' it's better.
+That's the way Joe Harper does, and he's been nearly to Coonville and
+most everywheres. But say--how do you cure 'em with dead cats?"
+
+"Why, you take your cat and go and get in the graveyard 'long about
+midnight when somebody that was wicked has been buried; and when it's
+midnight a devil will come, or maybe two or three, but you can't see
+'em, you can only hear something like the wind, or maybe hear 'em talk;
+and when they're taking that feller away, you heave your cat after 'em
+and say, 'Devil follow corpse, cat follow devil, warts follow cat, I'm
+done with ye!' That'll fetch ANY wart."
+
+"Sounds right. D'you ever try it, Huck?"
+
+"No, but old Mother Hopkins told me."
+
+"Well, I reckon it's so, then. Becuz they say she's a witch."
+
+"Say! Why, Tom, I KNOW she is. She witched pap. Pap says so his own
+self. He come along one day, and he see she was a-witching him, so he
+took up a rock, and if she hadn't dodged, he'd a got her. Well, that
+very night he rolled off'n a shed wher' he was a layin drunk, and broke
+his arm."
+
+"Why, that's awful. How did he know she was a-witching him?"
+
+"Lord, pap can tell, easy. Pap says when they keep looking at you
+right stiddy, they're a-witching you. Specially if they mumble. Becuz
+when they mumble they're saying the Lord's Prayer backards."
+
+"Say, Hucky, when you going to try the cat?"
+
+"To-night. I reckon they'll come after old Hoss Williams to-night."
+
+"But they buried him Saturday. Didn't they get him Saturday night?"
+
+"Why, how you talk! How could their charms work till midnight?--and
+THEN it's Sunday. Devils don't slosh around much of a Sunday, I don't
+reckon."
+
+"I never thought of that. That's so. Lemme go with you?"
+
+"Of course--if you ain't afeard."
+
+"Afeard! 'Tain't likely. Will you meow?"
+
+"Yes--and you meow back, if you get a chance. Last time, you kep' me
+a-meowing around till old Hays went to throwing rocks at me and says
+'Dern that cat!' and so I hove a brick through his window--but don't
+you tell."
+
+"I won't. I couldn't meow that night, becuz auntie was watching me,
+but I'll meow this time. Say--what's that?"
+
+"Nothing but a tick."
+
+"Where'd you get him?"
+
+"Out in the woods."
+
+"What'll you take for him?"
+
+"I don't know. I don't want to sell him."
+
+"All right. It's a mighty small tick, anyway."
+
+"Oh, anybody can run a tick down that don't belong to them. I'm
+satisfied with it. It's a good enough tick for me."
+
+"Sho, there's ticks a plenty. I could have a thousand of 'em if I
+wanted to."
+
+"Well, why don't you? Becuz you know mighty well you can't. This is a
+pretty early tick, I reckon. It's the first one I've seen this year."
+
+"Say, Huck--I'll give you my tooth for him."
+
+"Less see it."
+
+Tom got out a bit of paper and carefully unrolled it. Huckleberry
+viewed it wistfully. The temptation was very strong. At last he said:
+
+"Is it genuwyne?"
+
+Tom lifted his lip and showed the vacancy.
+
+"Well, all right," said Huckleberry, "it's a trade."
+
+Tom enclosed the tick in the percussion-cap box that had lately been
+the pinchbug's prison, and the boys separated, each feeling wealthier
+than before.
+
+When Tom reached the little isolated frame schoolhouse, he strode in
+briskly, with the manner of one who had come with all honest speed.
+He hung his hat on a peg and flung himself into his seat with
+business-like alacrity. The master, throned on high in his great
+splint-bottom arm-chair, was dozing, lulled by the drowsy hum of study.
+The interruption roused him.
+
+"Thomas Sawyer!"
+
+Tom knew that when his name was pronounced in full, it meant trouble.
+
+"Sir!"
+
+"Come up here. Now, sir, why are you late again, as usual?"
+
+Tom was about to take refuge in a lie, when he saw two long tails of
+yellow hair hanging down a back that he recognized by the electric
+sympathy of love; and by that form was THE ONLY VACANT PLACE on the
+girls' side of the schoolhouse. He instantly said:
+
+"I STOPPED TO TALK WITH HUCKLEBERRY FINN!"
+
+The master's pulse stood still, and he stared helplessly. The buzz of
+study ceased. The pupils wondered if this foolhardy boy had lost his
+mind. The master said:
+
+"You--you did what?"
+
+"Stopped to talk with Huckleberry Finn."
+
+There was no mistaking the words.
+
+"Thomas Sawyer, this is the most astounding confession I have ever
+listened to. No mere ferule will answer for this offence. Take off your
+jacket."
+
+The master's arm performed until it was tired and the stock of
+switches notably diminished. Then the order followed:
+
+"Now, sir, go and sit with the girls! And let this be a warning to you."
+
+The titter that rippled around the room appeared to abash the boy, but
+in reality that result was caused rather more by his worshipful awe of
+his unknown idol and the dread pleasure that lay in his high good
+fortune. He sat down upon the end of the pine bench and the girl
+hitched herself away from him with a toss of her head. Nudges and winks
+and whispers traversed the room, but Tom sat still, with his arms upon
+the long, low desk before him, and seemed to study his book.
+
+By and by attention ceased from him, and the accustomed school murmur
+rose upon the dull air once more. Presently the boy began to steal
+furtive glances at the girl. She observed it, "made a mouth" at him and
+gave him the back of her head for the space of a minute. When she
+cautiously faced around again, a peach lay before her. She thrust it
+away. Tom gently put it back. She thrust it away again, but with less
+animosity. Tom patiently returned it to its place. Then she let it
+remain. Tom scrawled on his slate, "Please take it--I got more." The
+girl glanced at the words, but made no sign. Now the boy began to draw
+something on the slate, hiding his work with his left hand. For a time
+the girl refused to notice; but her human curiosity presently began to
+manifest itself by hardly perceptible signs. The boy worked on,
+apparently unconscious. The girl made a sort of noncommittal attempt to
+see, but the boy did not betray that he was aware of it. At last she
+gave in and hesitatingly whispered:
+
+"Let me see it."
+
+Tom partly uncovered a dismal caricature of a house with two gable
+ends to it and a corkscrew of smoke issuing from the chimney. Then the
+girl's interest began to fasten itself upon the work and she forgot
+everything else. When it was finished, she gazed a moment, then
+whispered:
+
+"It's nice--make a man."
+
+The artist erected a man in the front yard, that resembled a derrick.
+He could have stepped over the house; but the girl was not
+hypercritical; she was satisfied with the monster, and whispered:
+
+"It's a beautiful man--now make me coming along."
+
+Tom drew an hour-glass with a full moon and straw limbs to it and
+armed the spreading fingers with a portentous fan. The girl said:
+
+"It's ever so nice--I wish I could draw."
+
+"It's easy," whispered Tom, "I'll learn you."
+
+"Oh, will you? When?"
+
+"At noon. Do you go home to dinner?"
+
+"I'll stay if you will."
+
+"Good--that's a whack. What's your name?"
+
+"Becky Thatcher. What's yours? Oh, I know. It's Thomas Sawyer."
+
+"That's the name they lick me by. I'm Tom when I'm good. You call me
+Tom, will you?"
+
+"Yes."
+
+Now Tom began to scrawl something on the slate, hiding the words from
+the girl. But she was not backward this time. She begged to see. Tom
+said:
+
+"Oh, it ain't anything."
+
+"Yes it is."
+
+"No it ain't. You don't want to see."
+
+"Yes I do, indeed I do. Please let me."
+
+"You'll tell."
+
+"No I won't--deed and deed and double deed won't."
+
+"You won't tell anybody at all? Ever, as long as you live?"
+
+"No, I won't ever tell ANYbody. Now let me."
+
+"Oh, YOU don't want to see!"
+
+"Now that you treat me so, I WILL see." And she put her small hand
+upon his and a little scuffle ensued, Tom pretending to resist in
+earnest but letting his hand slip by degrees till these words were
+revealed: "I LOVE YOU."
+
+"Oh, you bad thing!" And she hit his hand a smart rap, but reddened
+and looked pleased, nevertheless.
+
+Just at this juncture the boy felt a slow, fateful grip closing on his
+ear, and a steady lifting impulse. In that wise he was borne across the
+house and deposited in his own seat, under a peppering fire of giggles
+from the whole school. Then the master stood over him during a few
+awful moments, and finally moved away to his throne without saying a
+word. But although Tom's ear tingled, his heart was jubilant.
+
+As the school quieted down Tom made an honest effort to study, but the
+turmoil within him was too great. In turn he took his place in the
+reading class and made a botch of it; then in the geography class and
+turned lakes into mountains, mountains into rivers, and rivers into
+continents, till chaos was come again; then in the spelling class, and
+got "turned down," by a succession of mere baby words, till he brought
+up at the foot and yielded up the pewter medal which he had worn with
+ostentation for months.
+
+
+
+CHAPTER VII
+
+THE harder Tom tried to fasten his mind on his book, the more his
+ideas wandered. So at last, with a sigh and a yawn, he gave it up. It
+seemed to him that the noon recess would never come. The air was
+utterly dead. There was not a breath stirring. It was the sleepiest of
+sleepy days. The drowsing murmur of the five and twenty studying
+scholars soothed the soul like the spell that is in the murmur of bees.
+Away off in the flaming sunshine, Cardiff Hill lifted its soft green
+sides through a shimmering veil of heat, tinted with the 

<TRUNCATED>

[30/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
new file mode 100644
index 0000000..88d0f80
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPlannerMockJob.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.jetbrains.annotations.Nullable;
+
+import java.util.Collection;
+import java.util.UUID;
+
+/**
+ * Mock job for planner tests.
+ */
+public class HadoopPlannerMockJob implements HadoopJob {
+    /** Input splits. */
+    private final Collection<HadoopInputSplit> splits;
+
+    /** Reducers count. */
+    private final int reducers;
+
+    /**
+     * Constructor.
+     *
+     * @param splits Input splits.
+     * @param reducers Reducers.
+     */
+    public HadoopPlannerMockJob(Collection<HadoopInputSplit> splits, int reducers) {
+        this.splits = splits;
+        this.reducers = reducers;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<HadoopInputSplit> input() throws IgniteCheckedException {
+        return splits;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobInfo info() {
+        return new JobInfo(reducers);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobId id() {
+        throwUnsupported();
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
+        throwUnsupported();
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void initialize(boolean external, UUID nodeId) throws IgniteCheckedException {
+        throwUnsupported();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void dispose(boolean external) throws IgniteCheckedException {
+        throwUnsupported();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
+        throwUnsupported();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
+        throwUnsupported();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cleanupStagingDirectory() {
+        throwUnsupported();
+    }
+
+    /**
+     * Throw {@link UnsupportedOperationException}.
+     */
+    private static void throwUnsupported() {
+        throw new UnsupportedOperationException("Should not be called!");
+    }
+
+    /**
+     * Mocked job info.
+     */
+    private static class JobInfo implements HadoopJobInfo {
+        /** Reducers. */
+        private final int reducers;
+
+        /**
+         * Constructor.
+         *
+         * @param reducers Reducers.
+         */
+        public JobInfo(int reducers) {
+            this.reducers = reducers;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int reducers() {
+            return reducers;
+        }
+
+        /** {@inheritDoc} */
+        @Nullable @Override public String property(String name) {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasCombiner() {
+            throwUnsupported();
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasReducer() {
+            throwUnsupported();
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopJob createJob(Class<? extends HadoopJob> jobCls, HadoopJobId jobId, IgniteLogger log,
+            @Nullable String[] libNames) throws IgniteCheckedException {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String jobName() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String user() {
+            throwUnsupported();
+
+            return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java
new file mode 100644
index 0000000..3f825b0
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import com.google.common.collect.MinMaxPriorityQueue;
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Map.Entry;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static com.google.common.collect.Maps.immutableEntry;
+import static com.google.common.collect.MinMaxPriorityQueue.orderedBy;
+import static java.util.Collections.reverseOrder;
+
+/**
+ * Hadoop-based 10 popular words example: all files in a given directory are tokenized and for each word longer than
+ * 3 characters the number of occurrences ins calculated. Finally, 10 words with the highest occurrence count are
+ * output.
+ *
+ * NOTE: in order to run this example on Windows please ensure that cygwin is installed and available in the system
+ * path.
+ */
+public class HadoopPopularWordsTest {
+    /** Ignite home. */
+    private static final String IGNITE_HOME = U.getIgniteHome();
+
+    /** The path to the input directory. ALl files in that directory will be processed. */
+    private static final Path BOOKS_LOCAL_DIR =
+        new Path("file:" + IGNITE_HOME, "modules/tests/java/org/apache/ignite/grid/hadoop/books");
+
+    /** The path to the output directory. THe result file will be written to this location. */
+    private static final Path RESULT_LOCAL_DIR =
+        new Path("file:" + IGNITE_HOME, "modules/tests/java/org/apache/ignite/grid/hadoop/output");
+
+    /** Popular books source dir in DFS. */
+    private static final Path BOOKS_DFS_DIR = new Path("tmp/word-count-example/in");
+
+    /** Popular books source dir in DFS. */
+    private static final Path RESULT_DFS_DIR = new Path("tmp/word-count-example/out");
+
+    /** Path to the distributed file system configuration. */
+    private static final String DFS_CFG = "examples/config/filesystem/core-site.xml";
+
+    /** Top N words to select **/
+    private static final int POPULAR_WORDS_CNT = 10;
+
+    /**
+     * For each token in the input string the mapper emits a {word, 1} pair.
+     */
+    private static class TokenizingMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
+        /** Constant value. */
+        private static final IntWritable ONE = new IntWritable(1);
+
+        /** The word converted into the Text. */
+        private Text word = new Text();
+
+        /**
+         * Emits a entry where the key is the word and the value is always 1.
+         *
+         * @param key the current position in the input file (not used here)
+         * @param val the text string
+         * @param ctx mapper context
+         * @throws IOException
+         * @throws InterruptedException
+         */
+        @Override protected void map(LongWritable key, Text val, Context ctx)
+            throws IOException, InterruptedException {
+            // Get the mapped object.
+            final String line = val.toString();
+
+            // Splits the given string to words.
+            final String[] words = line.split("[^a-zA-Z0-9]");
+
+            for (final String w : words) {
+                // Only emit counts for longer words.
+                if (w.length() <= 3)
+                    continue;
+
+                word.set(w);
+
+                // Write the word into the context with the initial count equals 1.
+                ctx.write(word, ONE);
+            }
+        }
+    }
+
+    /**
+     * The reducer uses a priority queue to rank the words based on its number of occurrences.
+     */
+    private static class TopNWordsReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
+        private MinMaxPriorityQueue<Entry<Integer, String>> q;
+
+        TopNWordsReducer() {
+            q = orderedBy(reverseOrder(new Comparator<Entry<Integer, String>>() {
+                @Override public int compare(Entry<Integer, String> o1, Entry<Integer, String> o2) {
+                    return o1.getKey().compareTo(o2.getKey());
+                }
+            })).expectedSize(POPULAR_WORDS_CNT).maximumSize(POPULAR_WORDS_CNT).create();
+        }
+
+        /**
+         * This method doesn't emit anything, but just keeps track of the top N words.
+         *
+         * @param key The word.
+         * @param vals The words counts.
+         * @param ctx Reducer context.
+         * @throws IOException If failed.
+         * @throws InterruptedException If failed.
+         */
+        @Override public void reduce(Text key, Iterable<IntWritable> vals, Context ctx) throws IOException,
+            InterruptedException {
+            int sum = 0;
+
+            for (IntWritable val : vals)
+                sum += val.get();
+
+            q.add(immutableEntry(sum, key.toString()));
+        }
+
+        /**
+         * This method is called after all the word entries have been processed. It writes the accumulated
+         * statistics to the job output file.
+         *
+         * @param ctx The job context.
+         * @throws IOException If failed.
+         * @throws InterruptedException If failed.
+         */
+        @Override protected void cleanup(Context ctx) throws IOException, InterruptedException {
+            IntWritable i = new IntWritable();
+
+            Text txt = new Text();
+
+            // iterate in desc order
+            while (!q.isEmpty()) {
+                Entry<Integer, String> e = q.removeFirst();
+
+                i.set(e.getKey());
+
+                txt.set(e.getValue());
+
+                ctx.write(txt, i);
+            }
+        }
+    }
+
+    /**
+     * Configures the Hadoop MapReduce job.
+     *
+     * @return Instance of the Hadoop MapRed job.
+     * @throws IOException If failed.
+     */
+    @SuppressWarnings("deprecation")
+    private Job createConfigBasedHadoopJob() throws IOException {
+        Job jobCfg = new Job();
+
+        Configuration cfg = jobCfg.getConfiguration();
+
+        // Use explicit configuration of distributed file system, if provided.
+        cfg.addResource(U.resolveIgniteUrl(DFS_CFG));
+
+        jobCfg.setJobName("HadoopPopularWordExample");
+        jobCfg.setJarByClass(HadoopPopularWordsTest.class);
+        jobCfg.setInputFormatClass(TextInputFormat.class);
+        jobCfg.setOutputKeyClass(Text.class);
+        jobCfg.setOutputValueClass(IntWritable.class);
+        jobCfg.setMapperClass(TokenizingMapper.class);
+        jobCfg.setReducerClass(TopNWordsReducer.class);
+
+        FileInputFormat.setInputPaths(jobCfg, BOOKS_DFS_DIR);
+        FileOutputFormat.setOutputPath(jobCfg, RESULT_DFS_DIR);
+
+        // Local job tracker allows the only task per wave, but text input format
+        // replaces it with the calculated value based on input split size option.
+        if ("local".equals(cfg.get("mapred.job.tracker", "local"))) {
+            // Split job into tasks using 32MB split size.
+            FileInputFormat.setMinInputSplitSize(jobCfg, 32 * 1024 * 1024);
+            FileInputFormat.setMaxInputSplitSize(jobCfg, Long.MAX_VALUE);
+        }
+
+        return jobCfg;
+    }
+
+    /**
+     * Runs the Hadoop job.
+     *
+     * @return {@code True} if succeeded, {@code false} otherwise.
+     * @throws Exception If failed.
+     */
+    private boolean runWordCountConfigBasedHadoopJob() throws Exception {
+        Job job = createConfigBasedHadoopJob();
+
+        // Distributed file system this job will work with.
+        FileSystem fs = FileSystem.get(job.getConfiguration());
+
+        X.println(">>> Using distributed file system: " + fs.getHomeDirectory());
+
+        // Prepare input and output job directories.
+        prepareDirectories(fs);
+
+        long time = System.currentTimeMillis();
+
+        // Run job.
+        boolean res = job.waitForCompletion(true);
+
+        X.println(">>> Job execution time: " + (System.currentTimeMillis() - time) / 1000 + " sec.");
+
+        // Move job results into local file system, so you can view calculated results.
+        publishResults(fs);
+
+        return res;
+    }
+
+    /**
+     * Prepare job's data: cleanup result directories that might have left over
+     * after previous runs, copy input files from the local file system into DFS.
+     *
+     * @param fs Distributed file system to use in job.
+     * @throws IOException If failed.
+     */
+    private void prepareDirectories(FileSystem fs) throws IOException {
+        X.println(">>> Cleaning up DFS result directory: " + RESULT_DFS_DIR);
+
+        fs.delete(RESULT_DFS_DIR, true);
+
+        X.println(">>> Cleaning up DFS input directory: " + BOOKS_DFS_DIR);
+
+        fs.delete(BOOKS_DFS_DIR, true);
+
+        X.println(">>> Copy local files into DFS input directory: " + BOOKS_DFS_DIR);
+
+        fs.copyFromLocalFile(BOOKS_LOCAL_DIR, BOOKS_DFS_DIR);
+    }
+
+    /**
+     * Publish job execution results into local file system, so you can view them.
+     *
+     * @param fs Distributed file sytem used in job.
+     * @throws IOException If failed.
+     */
+    private void publishResults(FileSystem fs) throws IOException {
+        X.println(">>> Cleaning up DFS input directory: " + BOOKS_DFS_DIR);
+
+        fs.delete(BOOKS_DFS_DIR, true);
+
+        X.println(">>> Cleaning up LOCAL result directory: " + RESULT_LOCAL_DIR);
+
+        fs.delete(RESULT_LOCAL_DIR, true);
+
+        X.println(">>> Moving job results into LOCAL result directory: " + RESULT_LOCAL_DIR);
+
+        fs.copyToLocalFile(true, RESULT_DFS_DIR, RESULT_LOCAL_DIR);
+    }
+
+    /**
+     * Executes a modified version of the Hadoop word count example. Here, in addition to counting the number of
+     * occurrences of the word in the source files, the N most popular words are selected.
+     *
+     * @param args None.
+     */
+    public static void main(String[] args) {
+        try {
+            new HadoopPopularWordsTest().runWordCountConfigBasedHadoopJob();
+        }
+        catch (Exception e) {
+            X.println(">>> Failed to run word count example: " + e.getMessage());
+        }
+
+        System.exit(0);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java
new file mode 100644
index 0000000..789a6b3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.util.Arrays;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.serializer.JavaSerialization;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopSerializationWrapper;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Test of wrapper of the native serialization.
+ */
+public class HadoopSerializationWrapperSelfTest extends GridCommonAbstractTest {
+    /**
+     * Tests read/write of IntWritable via native WritableSerialization.
+     * @throws Exception If fails.
+     */
+    public void testIntWritableSerialization() throws Exception {
+        HadoopSerialization ser = new HadoopSerializationWrapper(new WritableSerialization(), IntWritable.class);
+
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+        DataOutput out = new DataOutputStream(buf);
+
+        ser.write(out, new IntWritable(3));
+        ser.write(out, new IntWritable(-5));
+
+        assertEquals("[0, 0, 0, 3, -1, -1, -1, -5]", Arrays.toString(buf.toByteArray()));
+
+        DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
+
+        assertEquals(3, ((IntWritable)ser.read(in, null)).get());
+        assertEquals(-5, ((IntWritable)ser.read(in, null)).get());
+    }
+
+    /**
+     * Tests read/write of Integer via native JavaleSerialization.
+     * @throws Exception If fails.
+     */
+    public void testIntJavaSerialization() throws Exception {
+        HadoopSerialization ser = new HadoopSerializationWrapper(new JavaSerialization(), Integer.class);
+
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+        DataOutput out = new DataOutputStream(buf);
+
+        ser.write(out, 3);
+        ser.write(out, -5);
+        ser.close();
+
+        DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
+
+        assertEquals(3, ((Integer)ser.read(in, null)).intValue());
+        assertEquals(-5, ((Integer)ser.read(in, null)).intValue());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java
new file mode 100644
index 0000000..7552028
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.concurrent.ConcurrentMap;
+import org.jsr166.ConcurrentHashMap8;
+
+/**
+ * For tests.
+ */
+public class HadoopSharedMap {
+    /** */
+    private static final ConcurrentMap<String, HadoopSharedMap> maps = new ConcurrentHashMap8<>();
+
+    /** */
+    private final ConcurrentMap<String, Object> map = new ConcurrentHashMap8<>();
+
+    /**
+     * Private.
+     */
+    private HadoopSharedMap() {
+        // No-op.
+    }
+
+    /**
+     * Puts object by key.
+     *
+     * @param key Key.
+     * @param val Value.
+     */
+    public <T> T put(String key, T val) {
+        Object old = map.putIfAbsent(key, val);
+
+        return old == null ? val : (T)old;
+    }
+
+    /**
+     * @param cls Class.
+     * @return Map of static fields.
+     */
+    public static HadoopSharedMap map(Class<?> cls) {
+        HadoopSharedMap m = maps.get(cls.getName());
+
+        if (m != null)
+            return m;
+
+        HadoopSharedMap old = maps.putIfAbsent(cls.getName(), m = new HadoopSharedMap());
+
+        return old == null ? m : old;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
new file mode 100644
index 0000000..27a5fcd
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyFullMapReduceTest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+/**
+ * Same test as HadoopMapReduceTest, but with enabled Snappy output compression.
+ */
+public class HadoopSnappyFullMapReduceTest extends HadoopMapReduceTest {
+    /** {@inheritDoc} */
+    @Override protected boolean compressOutputSnappy() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean[][] getApiModes() {
+        return new boolean[][] {
+            { false, false, true },
+            { true, true, true },
+        };
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
new file mode 100644
index 0000000..b4e3dc2
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSnappyTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Arrays;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.SnappyCodec;
+import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests isolated Hadoop Snappy codec usage.
+ */
+public class HadoopSnappyTest extends GridCommonAbstractTest {
+    /** Length of data. */
+    private static final int BYTE_SIZE = 1024 * 50;
+
+    /**
+     * Checks Snappy codec usage.
+     *
+     * @throws Exception On error.
+     */
+    public void testSnappy() throws Throwable {
+        // Run Snappy test in default class loader:
+        checkSnappy();
+
+        // Run the same in several more class loaders simulating jobs and tasks:
+        for (int i = 0; i < 2; i++) {
+            ClassLoader hadoopClsLdr = new HadoopClassLoader(null, "cl-" + i, null);
+
+            Class<?> cls = (Class)Class.forName(HadoopSnappyTest.class.getName(), true, hadoopClsLdr);
+
+            assertEquals(hadoopClsLdr, cls.getClassLoader());
+
+            U.invoke(cls, null, "checkSnappy");
+        }
+    }
+
+    /**
+     * Internal check routine.
+     *
+     * @throws Throwable If failed.
+     */
+    public static void checkSnappy() throws Throwable {
+        try {
+            byte[] expBytes = new byte[BYTE_SIZE];
+            byte[] actualBytes = new byte[BYTE_SIZE];
+
+            for (int i = 0; i < expBytes.length ; i++)
+                expBytes[i] = (byte)ThreadLocalRandom.current().nextInt(16);
+
+            SnappyCodec codec = new SnappyCodec();
+
+            codec.setConf(new Configuration());
+
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+
+            try (CompressionOutputStream cos = codec.createOutputStream(baos)) {
+                cos.write(expBytes);
+                cos.flush();
+            }
+
+            try (CompressionInputStream cis = codec.createInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
+                int read = cis.read(actualBytes, 0, actualBytes.length);
+
+                assert read == actualBytes.length;
+            }
+
+            assert Arrays.equals(expBytes, actualBytes);
+        }
+        catch (Throwable e) {
+            System.out.println("Snappy check failed:");
+            System.out.println("### NativeCodeLoader.isNativeCodeLoaded:  " + NativeCodeLoader.isNativeCodeLoaded());
+            System.out.println("### SnappyCompressor.isNativeCodeLoaded:  " + SnappyCompressor.isNativeCodeLoaded());
+
+            throw e;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java
new file mode 100644
index 0000000..dff5e70
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.marshaller.jdk.JdkMarshaller;
+
+/**
+ * External test for sorting.
+ */
+public class HadoopSortingExternalTest extends HadoopSortingTest {
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(true);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setMarshaller(new JdkMarshaller());
+
+        return cfg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java
new file mode 100644
index 0000000..20f5eef
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Scanner;
+import java.util.UUID;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.serializer.JavaSerialization;
+import org.apache.hadoop.io.serializer.JavaSerializationComparator;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.util.typedef.X;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Tests correct sorting.
+ */
+public class HadoopSortingTest extends HadoopAbstractSelfTest {
+    /** */
+    private static final String PATH_INPUT = "/test-in";
+
+    /** */
+    private static final String PATH_OUTPUT = "/test-out";
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 3;
+    }
+
+    /**
+     * @return {@code True} if IGFS is enabled on Hadoop nodes.
+     */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSortSimple() throws Exception {
+        // Generate test data.
+        Job job = Job.getInstance();
+
+        job.setInputFormatClass(InFormat.class);
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(NullWritable.class);
+
+        job.setMapperClass(Mapper.class);
+        job.setNumReduceTasks(0);
+
+        setupFileSystems(job.getConfiguration());
+
+        FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_INPUT));
+
+        X.printerrln("Data generation started.");
+
+        grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
+            createJobInfo(job.getConfiguration())).get(180000);
+
+        X.printerrln("Data generation complete.");
+
+        // Run main map-reduce job.
+        job = Job.getInstance();
+
+        setupFileSystems(job.getConfiguration());
+
+        job.getConfiguration().set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, JavaSerialization.class.getName() +
+            "," + WritableSerialization.class.getName());
+
+        FileInputFormat.setInputPaths(job, new Path(igfsScheme() + PATH_INPUT));
+        FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
+
+        job.setSortComparatorClass(JavaSerializationComparator.class);
+
+        job.setMapperClass(MyMapper.class);
+        job.setReducerClass(MyReducer.class);
+
+        job.setNumReduceTasks(2);
+
+        job.setMapOutputKeyClass(UUID.class);
+        job.setMapOutputValueClass(NullWritable.class);
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(NullWritable.class);
+
+        X.printerrln("Job started.");
+
+        grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2),
+            createJobInfo(job.getConfiguration())).get(180000);
+
+        X.printerrln("Job complete.");
+
+        // Check result.
+        Path outDir = new Path(igfsScheme() + PATH_OUTPUT);
+
+        AbstractFileSystem fs = AbstractFileSystem.get(new URI(igfsScheme()), job.getConfiguration());
+
+        for (FileStatus file : fs.listStatus(outDir)) {
+            X.printerrln("__ file: " + file);
+
+            if (file.getLen() == 0)
+                continue;
+
+            FSDataInputStream in = fs.open(file.getPath());
+
+            Scanner sc = new Scanner(in);
+
+            UUID prev = null;
+
+            while(sc.hasNextLine()) {
+                UUID next = UUID.fromString(sc.nextLine());
+
+//                X.printerrln("___ check: " + next);
+
+                if (prev != null)
+                    assertTrue(prev.compareTo(next) < 0);
+
+                prev = next;
+            }
+        }
+    }
+
+    public static class InFormat extends InputFormat<Text, NullWritable> {
+        /** {@inheritDoc} */
+        @Override public List<InputSplit> getSplits(JobContext ctx) throws IOException, InterruptedException {
+            List<InputSplit> res = new ArrayList<>();
+
+            FakeSplit split = new FakeSplit(20);
+
+            for (int i = 0; i < 10; i++)
+                res.add(split);
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public RecordReader<Text, NullWritable> createRecordReader(final InputSplit split,
+            TaskAttemptContext ctx) throws IOException, InterruptedException {
+            return new RecordReader<Text, NullWritable>() {
+                /** */
+                int cnt;
+
+                /** */
+                Text txt = new Text();
+
+                @Override public void initialize(InputSplit split, TaskAttemptContext ctx) {
+                    // No-op.
+                }
+
+                @Override public boolean nextKeyValue() throws IOException, InterruptedException {
+                    return ++cnt <= split.getLength();
+                }
+
+                @Override public Text getCurrentKey() {
+                    txt.set(UUID.randomUUID().toString());
+
+//                    X.printerrln("___ read: " + txt);
+
+                    return txt;
+                }
+
+                @Override public NullWritable getCurrentValue() {
+                    return NullWritable.get();
+                }
+
+                @Override public float getProgress() throws IOException, InterruptedException {
+                    return (float)cnt / split.getLength();
+                }
+
+                @Override public void close() {
+                    // No-op.
+                }
+            };
+        }
+    }
+
+    public static class MyMapper extends Mapper<LongWritable, Text, UUID, NullWritable> {
+        /** {@inheritDoc} */
+        @Override protected void map(LongWritable key, Text val, Context ctx) throws IOException, InterruptedException {
+//            X.printerrln("___ map: " + val);
+
+            ctx.write(UUID.fromString(val.toString()), NullWritable.get());
+        }
+    }
+
+    public static class MyReducer extends Reducer<UUID, NullWritable, Text, NullWritable> {
+        /** */
+        private Text text = new Text();
+
+        /** {@inheritDoc} */
+        @Override protected void reduce(UUID key, Iterable<NullWritable> vals, Context ctx)
+            throws IOException, InterruptedException {
+//            X.printerrln("___ rdc: " + key);
+
+            text.set(key.toString());
+
+            ctx.write(text, NullWritable.get());
+        }
+    }
+
+    public static class FakeSplit extends InputSplit implements Writable {
+        /** */
+        private static final String[] HOSTS = {"127.0.0.1"};
+
+        /** */
+        private int len;
+
+        /**
+         * @param len Length.
+         */
+        public FakeSplit(int len) {
+            this.len = len;
+        }
+
+        /**
+         *
+         */
+        public FakeSplit() {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public long getLength() throws IOException, InterruptedException {
+            return len;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String[] getLocations() throws IOException, InterruptedException {
+            return HOSTS;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(DataOutput out) throws IOException {
+            out.writeInt(len);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readFields(DataInput in) throws IOException {
+            len = in.readInt();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java
new file mode 100644
index 0000000..11c3907
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.util.Arrays;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopSplitWrapper;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ * Self test of {@link org.apache.ignite.internal.processors.hadoop.v2.HadoopSplitWrapper}.
+ */
+public class HadoopSplitWrapperSelfTest extends HadoopAbstractSelfTest {
+    /**
+     * Tests serialization of wrapper and the wrapped native split.
+     * @throws Exception If fails.
+     */
+    public void testSerialization() throws Exception {
+        FileSplit nativeSplit = new FileSplit(new Path("/path/to/file"), 100, 500, new String[]{"host1", "host2"});
+
+        assertEquals("/path/to/file:100+500", nativeSplit.toString());
+
+        HadoopSplitWrapper split = HadoopUtils.wrapSplit(10, nativeSplit, nativeSplit.getLocations());
+
+        assertEquals("[host1, host2]", Arrays.toString(split.hosts()));
+
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+        ObjectOutput out = new ObjectOutputStream(buf);
+
+        out.writeObject(split);
+
+        ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(buf.toByteArray()));
+
+        final HadoopSplitWrapper res = (HadoopSplitWrapper)in.readObject();
+
+        assertEquals("/path/to/file:100+500", HadoopUtils.unwrapSplit(res).toString());
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                res.hosts();
+
+                return null;
+            }
+        }, AssertionError.class, null);
+    }
+
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java
new file mode 100644
index 0000000..820a1f3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.util.typedef.G;
+
+/**
+ * Hadoop node startup.
+ */
+public class HadoopStartup {
+    /**
+     * @param args Arguments.
+     */
+    public static void main(String[] args) {
+        G.start("config/hadoop/default-config.xml");
+    }
+
+    /**
+     * @return Configuration for job run.
+     */
+    @SuppressWarnings("UnnecessaryFullyQualifiedName")
+    public static Configuration configuration() {
+        Configuration cfg = new Configuration();
+
+        cfg.set("fs.defaultFS", "igfs://igfs@localhost");
+
+        cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName());
+        cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+        cfg.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
+
+        cfg.set("mapreduce.framework.name", "ignite");
+        cfg.set("mapreduce.jobtracker.address", "localhost:11211");
+
+        return cfg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java
new file mode 100644
index 0000000..431433e
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java
@@ -0,0 +1,567 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Tests map-reduce task execution basics.
+ */
+public class HadoopTaskExecutionSelfTest extends HadoopAbstractSelfTest {
+    /** */
+    private static HadoopSharedMap m = HadoopSharedMap.map(HadoopTaskExecutionSelfTest.class);
+
+    /** Line count. */
+    private static final AtomicInteger totalLineCnt = m.put("totalLineCnt", new AtomicInteger());
+
+    /** Executed tasks. */
+    private static final AtomicInteger executedTasks = m.put("executedTasks", new AtomicInteger());
+
+    /** Cancelled tasks. */
+    private static final AtomicInteger cancelledTasks = m.put("cancelledTasks", new AtomicInteger());
+
+    /** Working directory of each task. */
+    private static final Map<String, String> taskWorkDirs = m.put("taskWorkDirs",
+        new ConcurrentHashMap<String, String>());
+
+    /** Mapper id to fail. */
+    private static final AtomicInteger failMapperId = m.put("failMapperId", new AtomicInteger());
+
+    /** Number of splits of the current input. */
+    private static final AtomicInteger splitsCount = m.put("splitsCount", new AtomicInteger());
+
+    /** Test param. */
+    private static final String MAP_WRITE = "test.map.write";
+
+
+    /** {@inheritDoc} */
+    @Override public FileSystemConfiguration igfsConfiguration() throws Exception {
+        FileSystemConfiguration cfg = super.igfsConfiguration();
+
+        cfg.setFragmentizerEnabled(false);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        grid(0).fileSystem(igfsName).format();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        cfg.setMaxParallelTasks(5);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMapRun() throws Exception {
+        int lineCnt = 10000;
+        String fileName = "/testFile";
+
+        prepareFile(fileName, lineCnt);
+
+        totalLineCnt.set(0);
+        taskWorkDirs.clear();
+
+        Configuration cfg = new Configuration();
+
+        cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+        Job job = Job.getInstance(cfg);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(TestMapper.class);
+
+        job.setNumReduceTasks(0);
+
+        job.setInputFormatClass(TextInputFormat.class);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/"));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output/"));
+
+        job.setJarByClass(getClass());
+
+        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
+                createJobInfo(job.getConfiguration()));
+
+        fut.get();
+
+        assertEquals(lineCnt, totalLineCnt.get());
+
+        assertEquals(32, taskWorkDirs.size());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMapCombineRun() throws Exception {
+        int lineCnt = 10001;
+        String fileName = "/testFile";
+
+        prepareFile(fileName, lineCnt);
+
+        totalLineCnt.set(0);
+        taskWorkDirs.clear();
+
+        Configuration cfg = new Configuration();
+
+        cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+        cfg.setBoolean(MAP_WRITE, true);
+
+        Job job = Job.getInstance(cfg);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(TestMapper.class);
+        job.setCombinerClass(TestCombiner.class);
+        job.setReducerClass(TestReducer.class);
+
+        job.setNumReduceTasks(2);
+
+        job.setInputFormatClass(TextInputFormat.class);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/"));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output"));
+
+        job.setJarByClass(getClass());
+
+        HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 2);
+
+        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
+
+        fut.get();
+
+        assertEquals(lineCnt, totalLineCnt.get());
+
+        assertEquals(34, taskWorkDirs.size());
+
+        for (int g = 0; g < gridCount(); g++)
+            grid(g).hadoop().finishFuture(jobId).get();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMapperException() throws Exception {
+        prepareFile("/testFile", 1000);
+
+        Configuration cfg = new Configuration();
+
+        cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+        Job job = Job.getInstance(cfg);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(FailMapper.class);
+
+        job.setNumReduceTasks(0);
+
+        job.setInputFormatClass(TextInputFormat.class);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/"));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output/"));
+
+        job.setJarByClass(getClass());
+
+        final IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 3),
+                createJobInfo(job.getConfiguration()));
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fut.get();
+
+                return null;
+            }
+        }, IgniteCheckedException.class, null);
+    }
+
+    /**
+     * @param fileName File name.
+     * @param lineCnt Line count.
+     * @throws Exception If failed.
+     */
+    private void prepareFile(String fileName, int lineCnt) throws Exception {
+        IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
+
+        try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) {
+            PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
+
+            for (int i = 0; i < lineCnt; i++)
+                w.print("Hello, Hadoop map-reduce!\n");
+
+            w.flush();
+        }
+    }
+
+    /**
+     * Prepare job with mappers to cancel.
+     * @return Fully configured job.
+     * @throws Exception If fails.
+     */
+    private Configuration prepareJobForCancelling() throws Exception {
+        prepareFile("/testFile", 1500);
+
+        executedTasks.set(0);
+        cancelledTasks.set(0);
+        failMapperId.set(0);
+        splitsCount.set(0);
+
+        Configuration cfg = new Configuration();
+
+        setupFileSystems(cfg);
+
+        Job job = Job.getInstance(cfg);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(CancellingTestMapper.class);
+
+        job.setNumReduceTasks(0);
+
+        job.setInputFormatClass(InFormat.class);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/"));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output/"));
+
+        job.setJarByClass(getClass());
+
+        return job.getConfiguration();
+    }
+
+    /**
+     * Test input format.
+     */
+    private static class InFormat extends TextInputFormat {
+        @Override public List<InputSplit> getSplits(JobContext ctx) throws IOException {
+            List<InputSplit> res = super.getSplits(ctx);
+
+            splitsCount.set(res.size());
+
+            X.println("___ split of input: " + splitsCount.get());
+
+            return res;
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTaskCancelling() throws Exception {
+        Configuration cfg = prepareJobForCancelling();
+
+        HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1);
+
+        final IgniteInternalFuture<?> fut = grid(0).hadoop().submit(jobId, createJobInfo(cfg));
+
+        if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return splitsCount.get() > 0;
+            }
+        }, 20000)) {
+            U.dumpThreads(log);
+
+            assertTrue(false);
+        }
+
+        if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return executedTasks.get() == splitsCount.get();
+            }
+        }, 20000)) {
+            U.dumpThreads(log);
+
+            assertTrue(false);
+        }
+
+        // Fail mapper with id "1", cancels others
+        failMapperId.set(1);
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fut.get();
+
+                return null;
+            }
+        }, IgniteCheckedException.class, null);
+
+        assertEquals(executedTasks.get(), cancelledTasks.get() + 1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testJobKill() throws Exception {
+        Configuration cfg = prepareJobForCancelling();
+
+        Hadoop hadoop = grid(0).hadoop();
+
+        HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1);
+
+        //Kill unknown job.
+        boolean killRes = hadoop.kill(jobId);
+
+        assertFalse(killRes);
+
+        final IgniteInternalFuture<?> fut = hadoop.submit(jobId, createJobInfo(cfg));
+
+        if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return splitsCount.get() > 0;
+            }
+        }, 20000)) {
+            U.dumpThreads(log);
+
+            assertTrue(false);
+        }
+
+        if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                X.println("___ executed tasks: " + executedTasks.get());
+
+                return executedTasks.get() == splitsCount.get();
+            }
+        }, 20000)) {
+            U.dumpThreads(log);
+
+            fail();
+        }
+
+        //Kill really ran job.
+        killRes = hadoop.kill(jobId);
+
+        assertTrue(killRes);
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fut.get();
+
+                return null;
+            }
+        }, IgniteCheckedException.class, null);
+
+        assertEquals(executedTasks.get(), cancelledTasks.get());
+
+        //Kill the same job again.
+        killRes = hadoop.kill(jobId);
+
+        assertFalse(killRes);
+    }
+
+    private static class CancellingTestMapper extends Mapper<Object, Text, Text, IntWritable> {
+        private int mapperId;
+
+        /** {@inheritDoc} */
+        @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+            mapperId = executedTasks.incrementAndGet();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run(Context ctx) throws IOException, InterruptedException {
+            try {
+                super.run(ctx);
+            }
+            catch (HadoopTaskCancelledException e) {
+                cancelledTasks.incrementAndGet();
+
+                throw e;
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            if (mapperId == failMapperId.get())
+                throw new IOException();
+
+            Thread.sleep(1000);
+        }
+    }
+
+    /**
+     * Test failing mapper.
+     */
+    private static class FailMapper extends Mapper<Object, Text, Text, IntWritable> {
+        /** {@inheritDoc} */
+        @Override protected void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            throw new IOException("Expected");
+        }
+    }
+
+    /**
+     * Mapper calculates number of lines.
+     */
+    private static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
+        /** Writable integer constant of '1'. */
+        private static final IntWritable ONE = new IntWritable(1);
+
+        /** Line count constant. */
+        public static final Text LINE_COUNT = new Text("lineCount");
+
+        /** {@inheritDoc} */
+        @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+            X.println("___ Mapper: " + ctx.getTaskAttemptID());
+
+            String taskId = ctx.getTaskAttemptID().toString();
+
+            LocalFileSystem locFs = FileSystem.getLocal(ctx.getConfiguration());
+
+            String workDir = locFs.getWorkingDirectory().toString();
+
+            assertNull(taskWorkDirs.put(workDir, taskId));
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            if (ctx.getConfiguration().getBoolean(MAP_WRITE, false))
+                ctx.write(LINE_COUNT, ONE);
+            else
+                totalLineCnt.incrementAndGet();
+        }
+    }
+
+    /**
+     * Combiner calculates number of lines.
+     */
+    private static class TestCombiner extends Reducer<Text, IntWritable, Text, IntWritable> {
+        /** */
+        IntWritable sum = new IntWritable();
+
+        /** {@inheritDoc} */
+        @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+            X.println("___ Combiner: ");
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
+            InterruptedException {
+            int lineCnt = 0;
+
+            for (IntWritable value : values)
+                lineCnt += value.get();
+
+            sum.set(lineCnt);
+
+            X.println("___ combo: " + lineCnt);
+
+            ctx.write(key, sum);
+        }
+    }
+
+    /**
+     * Combiner calculates number of lines.
+     */
+    private static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
+        /** */
+        IntWritable sum = new IntWritable();
+
+        /** {@inheritDoc} */
+        @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+            X.println("___ Reducer: " + ctx.getTaskAttemptID());
+
+            String taskId = ctx.getTaskAttemptID().toString();
+            String workDir = FileSystem.getLocal(ctx.getConfiguration()).getWorkingDirectory().toString();
+
+            assertNull(taskWorkDirs.put(workDir, taskId));
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
+            InterruptedException {
+            int lineCnt = 0;
+
+            for (IntWritable value : values) {
+                lineCnt += value.get();
+
+                X.println("___ rdcr: " + value.get());
+            }
+
+            sum.set(lineCnt);
+
+            ctx.write(key, sum);
+
+            X.println("___ RDCR SUM: " + lineCnt);
+
+            totalLineCnt.addAndGet(lineCnt);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java
new file mode 100644
index 0000000..7c6d244
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import com.google.common.base.Joiner;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+
+/**
+ * Tests of Map, Combine and Reduce task executions of any version of hadoop API.
+ */
+abstract class HadoopTasksAllVersionsTest extends HadoopAbstractWordCountTest {
+    /** Empty hosts array. */
+    private static final String[] HOSTS = new String[0];
+
+    /**
+     * Creates some grid hadoop job. Override this method to create tests for any job implementation.
+     *
+     * @param inFile Input file name for the job.
+     * @param outFile Output file name for the job.
+     * @return Hadoop job.
+     * @throws IOException If fails.
+     */
+    public abstract HadoopJob getHadoopJob(String inFile, String outFile) throws Exception;
+
+    /**
+     * @return prefix of reducer output file name. It's "part-" for v1 and "part-r-" for v2 API
+     */
+    public abstract String getOutputFileNamePrefix();
+
+    /**
+     * Tests map task execution.
+     *
+     * @throws Exception If fails.
+     */
+    @SuppressWarnings("ConstantConditions")
+    public void testMapTask() throws Exception {
+        IgfsPath inDir = new IgfsPath(PATH_INPUT);
+
+        igfs.mkdirs(inDir);
+
+        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
+
+        URI inFileUri = URI.create(igfsScheme() + inFile.toString());
+
+        try (PrintWriter pw = new PrintWriter(igfs.create(inFile, true))) {
+            pw.println("hello0 world0");
+            pw.println("world1 hello1");
+        }
+
+        HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1);
+
+        try (PrintWriter pw = new PrintWriter(igfs.append(inFile, false))) {
+            pw.println("hello2 world2");
+            pw.println("world3 hello3");
+        }
+        HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, fileBlock1.length(),
+                igfs.info(inFile).length() - fileBlock1.length());
+
+        HadoopJob gridJob = getHadoopJob(igfsScheme() + inFile.toString(), igfsScheme() + PATH_OUTPUT);
+
+        HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock1);
+
+        HadoopTestTaskContext ctx = new HadoopTestTaskContext(taskInfo, gridJob);
+
+        ctx.mockOutput().clear();
+
+        ctx.run();
+
+        assertEquals("hello0,1; world0,1; world1,1; hello1,1", Joiner.on("; ").join(ctx.mockOutput()));
+
+        ctx.mockOutput().clear();
+
+        ctx.taskInfo(new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock2));
+
+        ctx.run();
+
+        assertEquals("hello2,1; world2,1; world3,1; hello3,1", Joiner.on("; ").join(ctx.mockOutput()));
+    }
+
+    /**
+     * Generates input data for reduce-like operation into mock context input and runs the operation.
+     *
+     * @param gridJob Job is to create reduce task from.
+     * @param taskType Type of task - combine or reduce.
+     * @param taskNum Number of task in job.
+     * @param words Pairs of words and its counts.
+     * @return Context with mock output.
+     * @throws IgniteCheckedException If fails.
+     */
+    private HadoopTestTaskContext runTaskWithInput(HadoopJob gridJob, HadoopTaskType taskType,
+        int taskNum, String... words) throws IgniteCheckedException {
+        HadoopTaskInfo taskInfo = new HadoopTaskInfo(taskType, gridJob.id(), taskNum, 0, null);
+
+        HadoopTestTaskContext ctx = new HadoopTestTaskContext(taskInfo, gridJob);
+
+        for (int i = 0; i < words.length; i+=2) {
+            List<IntWritable> valList = new ArrayList<>();
+
+            for (int j = 0; j < Integer.parseInt(words[i + 1]); j++)
+                valList.add(new IntWritable(1));
+
+            ctx.mockInput().put(new Text(words[i]), valList);
+        }
+
+        ctx.run();
+
+        return ctx;
+    }
+
+    /**
+     * Tests reduce task execution.
+     *
+     * @throws Exception If fails.
+     */
+    public void testReduceTask() throws Exception {
+        HadoopJob gridJob = getHadoopJob(igfsScheme() + PATH_INPUT, igfsScheme() + PATH_OUTPUT);
+
+        runTaskWithInput(gridJob, HadoopTaskType.REDUCE, 0, "word1", "5", "word2", "10");
+        runTaskWithInput(gridJob, HadoopTaskType.REDUCE, 1, "word3", "7", "word4", "15");
+
+        assertEquals(
+            "word1\t5\n" +
+            "word2\t10\n",
+            readAndSortFile(PATH_OUTPUT + "/_temporary/0/task_00000000-0000-0000-0000-000000000000_0000_r_000000/" +
+                getOutputFileNamePrefix() + "00000")
+        );
+
+        assertEquals(
+            "word3\t7\n" +
+            "word4\t15\n",
+            readAndSortFile(PATH_OUTPUT + "/_temporary/0/task_00000000-0000-0000-0000-000000000000_0000_r_000001/" +
+                getOutputFileNamePrefix() + "00001")
+        );
+    }
+
+    /**
+     * Tests combine task execution.
+     *
+     * @throws Exception If fails.
+     */
+    public void testCombinerTask() throws Exception {
+        HadoopJob gridJob = getHadoopJob("/", "/");
+
+        HadoopTestTaskContext ctx =
+            runTaskWithInput(gridJob, HadoopTaskType.COMBINE, 0, "word1", "5", "word2", "10");
+
+        assertEquals("word1,5; word2,10", Joiner.on("; ").join(ctx.mockOutput()));
+
+        ctx = runTaskWithInput(gridJob, HadoopTaskType.COMBINE, 1, "word3", "7", "word4", "15");
+
+        assertEquals("word3,7; word4,15", Joiner.on("; ").join(ctx.mockOutput()));
+    }
+
+    /**
+     * Runs chain of map-combine task on file block.
+     *
+     * @param fileBlock block of input file to be processed.
+     * @param gridJob Hadoop job implementation.
+     * @return Context of combine task with mock output.
+     * @throws IgniteCheckedException If fails.
+     */
+    private HadoopTestTaskContext runMapCombineTask(HadoopFileBlock fileBlock, HadoopJob gridJob)
+        throws IgniteCheckedException {
+        HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock);
+
+        HadoopTestTaskContext mapCtx = new HadoopTestTaskContext(taskInfo, gridJob);
+
+        mapCtx.run();
+
+        //Prepare input for combine
+        taskInfo = new HadoopTaskInfo(HadoopTaskType.COMBINE, gridJob.id(), 0, 0, null);
+
+        HadoopTestTaskContext combineCtx = new HadoopTestTaskContext(taskInfo, gridJob);
+
+        combineCtx.makeTreeOfWritables(mapCtx.mockOutput());
+
+        combineCtx.run();
+
+        return combineCtx;
+    }
+
+    /**
+     * Tests all job in complex.
+     * Runs 2 chains of map-combine tasks and sends result into one reduce task.
+     *
+     * @throws Exception If fails.
+     */
+    @SuppressWarnings("ConstantConditions")
+    public void testAllTasks() throws Exception {
+        IgfsPath inDir = new IgfsPath(PATH_INPUT);
+
+        igfs.mkdirs(inDir);
+
+        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
+
+        URI inFileUri = URI.create(igfsScheme() + inFile.toString());
+
+        generateTestFile(inFile.toString(), "red", 100, "blue", 200, "green", 150, "yellow", 70);
+
+        //Split file into two blocks
+        long fileLen = igfs.info(inFile).length();
+
+        Long l = fileLen / 2;
+
+        HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, l);
+        HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, l, fileLen - l);
+
+        HadoopJob gridJob = getHadoopJob(inFileUri.toString(), igfsScheme() + PATH_OUTPUT);
+
+        HadoopTestTaskContext combine1Ctx = runMapCombineTask(fileBlock1, gridJob);
+
+        HadoopTestTaskContext combine2Ctx = runMapCombineTask(fileBlock2, gridJob);
+
+        //Prepare input for combine
+        HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, gridJob.id(), 0, 0, null);
+
+        HadoopTestTaskContext reduceCtx = new HadoopTestTaskContext(taskInfo, gridJob);
+
+        reduceCtx.makeTreeOfWritables(combine1Ctx.mockOutput());
+        reduceCtx.makeTreeOfWritables(combine2Ctx.mockOutput());
+
+        reduceCtx.run();
+
+        reduceCtx.taskInfo(new HadoopTaskInfo(HadoopTaskType.COMMIT, gridJob.id(), 0, 0, null));
+
+        reduceCtx.run();
+
+        assertEquals(
+            "blue\t200\n" +
+            "green\t150\n" +
+            "red\t100\n" +
+            "yellow\t70\n",
+            readAndSortFile(PATH_OUTPUT + "/" + getOutputFileNamePrefix() + "00000")
+        );
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java
new file mode 100644
index 0000000..27d7fc2
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.IOException;
+import java.util.UUID;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount1;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Tests of Map, Combine and Reduce task executions via running of job of hadoop API v1.
+ */
+public class HadoopTasksV1Test extends HadoopTasksAllVersionsTest {
+    /**
+     * Creates WordCount hadoop job for API v1.
+     *
+     * @param inFile Input file name for the job.
+     * @param outFile Output file name for the job.
+     * @return Hadoop job.
+     * @throws IOException If fails.
+     */
+    @Override public HadoopJob getHadoopJob(String inFile, String outFile) throws Exception {
+        JobConf jobConf = HadoopWordCount1.getJob(inFile, outFile);
+
+        setupFileSystems(jobConf);
+
+        HadoopDefaultJobInfo jobInfo = createJobInfo(jobConf);
+
+        UUID uuid = new UUID(0, 0);
+
+        HadoopJobId jobId = new HadoopJobId(uuid, 0);
+
+        return jobInfo.createJob(HadoopV2Job.class, jobId, log, null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getOutputFileNamePrefix() {
+        return "part-";
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
new file mode 100644
index 0000000..30cf50c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Tests of Map, Combine and Reduce task executions via running of job of hadoop API v2.
+ */
+public class HadoopTasksV2Test extends HadoopTasksAllVersionsTest {
+    /**
+     * Creates WordCount hadoop job for API v2.
+     *
+     * @param inFile Input file name for the job.
+     * @param outFile Output file name for the job.
+     * @return Hadoop job.
+     * @throws Exception if fails.
+     */
+    @Override public HadoopJob getHadoopJob(String inFile, String outFile) throws Exception {
+        Job job = Job.getInstance();
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        HadoopWordCount2.setTasksClasses(job, true, true, true, false);
+
+        Configuration conf = job.getConfiguration();
+
+        setupFileSystems(conf);
+
+        FileInputFormat.setInputPaths(job, new Path(inFile));
+        FileOutputFormat.setOutputPath(job, new Path(outFile));
+
+        job.setJarByClass(HadoopWordCount2.class);
+
+        Job hadoopJob = HadoopWordCount2.getJob(inFile, outFile);
+
+        HadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration());
+
+        UUID uuid = new UUID(0, 0);
+
+        HadoopJobId jobId = new HadoopJobId(uuid, 0);
+
+        return jobInfo.createJob(HadoopV2Job.class, jobId, log, null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getOutputFileNamePrefix() {
+        return "part-r-";
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java
new file mode 100644
index 0000000..edafecd
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.hadoop.planner.HadoopDefaultMapReducePlan;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Round-robin mr planner.
+ */
+public class HadoopTestRoundRobinMrPlanner implements HadoopMapReducePlanner {
+    /** {@inheritDoc} */
+    @Override public HadoopMapReducePlan preparePlan(HadoopJob job, Collection<ClusterNode> top,
+        @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException {
+        if (top.isEmpty())
+            throw new IllegalArgumentException("Topology is empty");
+
+        // Has at least one element.
+        Iterator<ClusterNode> it = top.iterator();
+
+        Map<UUID, Collection<HadoopInputSplit>> mappers = new HashMap<>();
+
+        for (HadoopInputSplit block : job.input()) {
+            ClusterNode node = it.next();
+
+            Collection<HadoopInputSplit> nodeBlocks = mappers.get(node.id());
+
+            if (nodeBlocks == null) {
+                nodeBlocks = new ArrayList<>();
+
+                mappers.put(node.id(), nodeBlocks);
+            }
+
+            nodeBlocks.add(block);
+
+            if (!it.hasNext())
+                it = top.iterator();
+        }
+
+        int[] rdc = new int[job.info().reducers()];
+
+        for (int i = 0; i < rdc.length; i++)
+            rdc[i] = i;
+
+        return new HadoopDefaultMapReducePlan(mappers, Collections.singletonMap(it.next().id(), rdc));
+    }
+}
\ No newline at end of file


[13/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
deleted file mode 100644
index 39b7c51..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import java.io.DataInput;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopDataInStream;
-import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopDataOutStream;
-import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopOffheapBuffer;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_OFFHEAP_PAGE_SIZE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
-
-/**
- * Base class for all multimaps.
- */
-public abstract class HadoopMultimapBase implements HadoopMultimap {
-    /** */
-    protected final GridUnsafeMemory mem;
-
-    /** */
-    protected final int pageSize;
-
-    /** */
-    private final Collection<Page> allPages = new ConcurrentLinkedQueue<>();
-
-    /**
-     * @param jobInfo Job info.
-     * @param mem Memory.
-     */
-    protected HadoopMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
-        assert jobInfo != null;
-        assert mem != null;
-
-        this.mem = mem;
-
-        pageSize = get(jobInfo, SHUFFLE_OFFHEAP_PAGE_SIZE, 32 * 1024);
-    }
-
-    /**
-     * @param page Page.
-     */
-    private void deallocate(Page page) {
-        assert page != null;
-
-        mem.release(page.ptr, page.size);
-    }
-
-    /**
-     * @param valPtr Value page pointer.
-     * @param nextValPtr Next value page pointer.
-     */
-    protected void nextValue(long valPtr, long nextValPtr) {
-        mem.writeLong(valPtr, nextValPtr);
-    }
-
-    /**
-     * @param valPtr Value page pointer.
-     * @return Next value page pointer.
-     */
-    protected long nextValue(long valPtr) {
-        return mem.readLong(valPtr);
-    }
-
-    /**
-     * @param valPtr Value page pointer.
-     * @param size Size.
-     */
-    protected void valueSize(long valPtr, int size) {
-        mem.writeInt(valPtr + 8, size);
-    }
-
-    /**
-     * @param valPtr Value page pointer.
-     * @return Value size.
-     */
-    protected int valueSize(long valPtr) {
-        return mem.readInt(valPtr + 8);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() {
-        for (Page page : allPages)
-            deallocate(page);
-    }
-
-    /**
-     * Reader for key and value.
-     */
-    protected class ReaderBase implements AutoCloseable {
-        /** */
-        private Object tmp;
-
-        /** */
-        private final HadoopSerialization ser;
-
-        /** */
-        private final HadoopDataInStream in = new HadoopDataInStream(mem);
-
-        /**
-         * @param ser Serialization.
-         */
-        protected ReaderBase(HadoopSerialization ser) {
-            assert ser != null;
-
-            this.ser = ser;
-        }
-
-        /**
-         * @param valPtr Value page pointer.
-         * @return Value.
-         */
-        public Object readValue(long valPtr) {
-            assert valPtr > 0 : valPtr;
-
-            try {
-                return read(valPtr + 12, valueSize(valPtr));
-            }
-            catch (IgniteCheckedException e) {
-                throw new IgniteException(e);
-            }
-        }
-
-        /**
-         * Resets temporary object to the given one.
-         *
-         * @param tmp Temporary object for reuse.
-         */
-        public void resetReusedObject(Object tmp) {
-            this.tmp = tmp;
-        }
-
-        /**
-         * @param ptr Pointer.
-         * @param size Object size.
-         * @return Object.
-         */
-        protected Object read(long ptr, long size) throws IgniteCheckedException {
-            in.buffer().set(ptr, size);
-
-            tmp = ser.read(in, tmp);
-
-            return tmp;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            ser.close();
-        }
-    }
-
-    /**
-     * Base class for adders.
-     */
-    protected abstract class AdderBase implements Adder {
-        /** */
-        protected final HadoopSerialization keySer;
-
-        /** */
-        protected final HadoopSerialization valSer;
-
-        /** */
-        private final HadoopDataOutStream out;
-
-        /** */
-        private long writeStart;
-
-        /** Current page. */
-        private Page curPage;
-
-        /**
-         * @param ctx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        protected AdderBase(HadoopTaskContext ctx) throws IgniteCheckedException {
-            valSer = ctx.valueSerialization();
-            keySer = ctx.keySerialization();
-
-            out = new HadoopDataOutStream(mem) {
-                @Override public long move(long size) {
-                    long ptr = super.move(size);
-
-                    if (ptr == 0) // Was not able to move - not enough free space.
-                        ptr = allocateNextPage(size);
-
-                    assert ptr != 0;
-
-                    return ptr;
-                }
-            };
-        }
-
-        /**
-         * @param requestedSize Requested size.
-         * @return Next write pointer.
-         */
-        private long allocateNextPage(long requestedSize) {
-            int writtenSize = writtenSize();
-
-            long newPageSize = nextPageSize(writtenSize + requestedSize);
-            long newPagePtr = mem.allocate(newPageSize);
-
-            HadoopOffheapBuffer b = out.buffer();
-
-            b.set(newPagePtr, newPageSize);
-
-            if (writtenSize != 0) {
-                mem.copyMemory(writeStart, newPagePtr, writtenSize);
-
-                b.move(writtenSize);
-            }
-
-            writeStart = newPagePtr;
-
-            // At this point old page is not needed, so we release it.
-            Page oldPage = curPage;
-
-            curPage = new Page(newPagePtr, newPageSize);
-
-            if (oldPage != null)
-                allPages.add(oldPage);
-
-            return b.move(requestedSize);
-        }
-
-        /**
-         * Get next page size.
-         *
-         * @param required Required amount of data.
-         * @return Next page size.
-         */
-        private long nextPageSize(long required) {
-            long pages = (required / pageSize) + 1;
-
-            long pagesPow2 = nextPowerOfTwo(pages);
-
-            return pagesPow2 * pageSize;
-        }
-
-        /**
-         * Get next power of two which greater or equal to the given number. Naive implementation.
-         *
-         * @param val Number
-         * @return Nearest pow2.
-         */
-        private long nextPowerOfTwo(long val) {
-            long res = 1;
-
-            while (res < val)
-                res = res << 1;
-
-            if (res < 0)
-                throw new IllegalArgumentException("Value is too big to find positive pow2: " + val);
-
-            return res;
-        }
-
-        /**
-         * @return Fixed pointer.
-         */
-        private long fixAlignment() {
-            HadoopOffheapBuffer b = out.buffer();
-
-            long ptr = b.pointer();
-
-            if ((ptr & 7L) != 0) { // Address is not aligned by octet.
-                ptr = (ptr + 8L) & ~7L;
-
-                b.pointer(ptr);
-            }
-
-            return ptr;
-        }
-
-        /**
-         * @param off Offset.
-         * @param o Object.
-         * @return Page pointer.
-         * @throws IgniteCheckedException If failed.
-         */
-        protected long write(int off, Object o, HadoopSerialization ser) throws IgniteCheckedException {
-            writeStart = fixAlignment();
-
-            if (off != 0)
-                out.move(off);
-
-            ser.write(out, o);
-
-            return writeStart;
-        }
-
-        /**
-         * @param size Size.
-         * @return Pointer.
-         */
-        protected long allocate(int size) {
-            writeStart = fixAlignment();
-
-            out.move(size);
-
-            return writeStart;
-        }
-
-        /**
-         * Rewinds local allocation pointer to the given pointer if possible.
-         *
-         * @param ptr Pointer.
-         */
-        protected void localDeallocate(long ptr) {
-            HadoopOffheapBuffer b = out.buffer();
-
-            if (b.isInside(ptr))
-                b.pointer(ptr);
-            else
-                b.reset();
-        }
-
-        /**
-         * @return Written size.
-         */
-        protected int writtenSize() {
-            return (int)(out.buffer().pointer() - writeStart);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
-            throw new UnsupportedOperationException();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            if (curPage != null)
-                allPages.add(curPage);
-
-            keySer.close();
-            valSer.close();
-        }
-    }
-
-    /**
-     * Iterator over values.
-     */
-    protected class ValueIterator implements Iterator<Object> {
-        /** */
-        private long valPtr;
-
-        /** */
-        private final ReaderBase valReader;
-
-        /**
-         * @param valPtr Value page pointer.
-         * @param valReader Value reader.
-         */
-        protected ValueIterator(long valPtr, ReaderBase valReader) {
-            this.valPtr = valPtr;
-            this.valReader = valReader;
-        }
-
-        /**
-         * @param valPtr Head value pointer.
-         */
-        public void head(long valPtr) {
-            this.valPtr = valPtr;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean hasNext() {
-            return valPtr != 0;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Object next() {
-            if (!hasNext())
-                throw new NoSuchElementException();
-
-            Object res = valReader.readValue(valPtr);
-
-            valPtr = nextValue(valPtr);
-
-            return res;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void remove() {
-            throw new UnsupportedOperationException();
-        }
-    }
-
-    /**
-     * Page.
-     */
-    private static class Page {
-        /** Pointer. */
-        private final long ptr;
-
-        /** Size. */
-        private final long size;
-
-        /**
-         * Constructor.
-         *
-         * @param ptr Pointer.
-         * @param size Size.
-         */
-        public Page(long ptr, long size) {
-            this.ptr = ptr;
-            this.size = size;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
deleted file mode 100644
index 7db88bc..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
-
-import java.io.DataInput;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.util.GridLongList;
-import org.apache.ignite.internal.util.GridRandom;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Skip list.
- */
-public class HadoopSkipList extends HadoopMultimapBase {
-    /** */
-    private static final int HEADS_SIZE = 24 + 33 * 8; // Offset + max level is from 0 to 32 inclusive.
-
-    /** Top level. */
-    private final AtomicInteger topLevel = new AtomicInteger(-1);
-
-    /** Heads for all the lists. */
-    private final long heads;
-
-    /** */
-    private final AtomicBoolean visitGuard = new AtomicBoolean();
-
-    /**
-     * @param jobInfo Job info.
-     * @param mem Memory.
-     */
-    public HadoopSkipList(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
-        super(jobInfo, mem);
-
-        heads = mem.allocate(HEADS_SIZE, true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() {
-        super.close();
-
-        mem.release(heads, HEADS_SIZE);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
-        if (!visitGuard.compareAndSet(false, true))
-            return false;
-
-        for (long meta = nextMeta(heads, 0); meta != 0L; meta = nextMeta(meta, 0)) {
-            long valPtr = value(meta);
-
-            long lastVisited = ignoreLastVisited ? 0 : lastVisitedValue(meta);
-
-            if (valPtr != lastVisited) {
-                long k = key(meta);
-
-                v.onKey(k + 4, keySize(k));
-
-                lastVisitedValue(meta, valPtr); // Set it to the first value in chain.
-
-                do {
-                    v.onValue(valPtr + 12, valueSize(valPtr));
-
-                    valPtr = nextValue(valPtr);
-                }
-                while (valPtr != lastVisited);
-            }
-        }
-
-        visitGuard.lazySet(false);
-
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
-        return new AdderImpl(ctx);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        Input in = new Input(taskCtx);
-
-        Comparator<Object> grpCmp = taskCtx.groupComparator();
-
-        if (grpCmp != null)
-            return new GroupedInput(grpCmp, in);
-
-        return in;
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Key pointer.
-     */
-    private long key(long meta) {
-        return mem.readLong(meta);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param key Key pointer.
-     */
-    private void key(long meta, long key) {
-        mem.writeLong(meta, key);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Value pointer.
-     */
-    private long value(long meta) {
-        return mem.readLongVolatile(meta + 8);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param valPtr Value pointer.
-     */
-    private void value(long meta, long valPtr) {
-        mem.writeLongVolatile(meta + 8, valPtr);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param oldValPtr Old first value pointer.
-     * @param newValPtr New first value pointer.
-     * @return {@code true} If operation succeeded.
-     */
-    private boolean casValue(long meta, long oldValPtr, long newValPtr) {
-        return mem.casLong(meta + 8, oldValPtr, newValPtr);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @return Last visited value pointer.
-     */
-    private long lastVisitedValue(long meta) {
-        return mem.readLong(meta + 16);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param valPtr Last visited value pointer.
-     */
-    private void lastVisitedValue(long meta, long valPtr) {
-        mem.writeLong(meta + 16, valPtr);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param level Level.
-     * @return Next meta pointer.
-     */
-    private long nextMeta(long meta, int level) {
-        assert meta > 0 : meta;
-
-        return mem.readLongVolatile(meta + 24 + 8 * level);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param level Level.
-     * @param oldNext Old next meta pointer.
-     * @param newNext New next meta pointer.
-     * @return {@code true} If operation succeeded.
-     */
-    private boolean casNextMeta(long meta, int level, long oldNext, long newNext) {
-        assert meta > 0 : meta;
-
-        return mem.casLong(meta + 24 + 8 * level, oldNext, newNext);
-    }
-
-    /**
-     * @param meta Meta pointer.
-     * @param level Level.
-     * @param nextMeta Next meta.
-     */
-    private void nextMeta(long meta, int level, long nextMeta) {
-        assert meta != 0;
-
-        mem.writeLong(meta + 24 + 8 * level, nextMeta);
-    }
-
-    /**
-     * @param keyPtr Key pointer.
-     * @return Key size.
-     */
-    private int keySize(long keyPtr) {
-        return mem.readInt(keyPtr);
-    }
-
-    /**
-     * @param keyPtr Key pointer.
-     * @param keySize Key size.
-     */
-    private void keySize(long keyPtr, int keySize) {
-        mem.writeInt(keyPtr, keySize);
-    }
-
-    /**
-     * @param rnd Random.
-     * @return Next level.
-     */
-    public static int randomLevel(Random rnd) {
-        int x = rnd.nextInt();
-
-        int level = 0;
-
-        while ((x & 1) != 0) { // Count sequential 1 bits.
-            level++;
-
-            x >>>= 1;
-        }
-
-        return level;
-    }
-
-    /**
-     * Reader.
-     */
-    private class Reader extends ReaderBase {
-        /**
-         * @param ser Serialization.
-         */
-        protected Reader(HadoopSerialization ser) {
-            super(ser);
-        }
-
-        /**
-         * @param meta Meta pointer.
-         * @return Key.
-         */
-        public Object readKey(long meta) {
-            assert meta > 0 : meta;
-
-            long k = key(meta);
-
-            try {
-                return read(k + 4, keySize(k));
-            }
-            catch (IgniteCheckedException e) {
-                throw new IgniteException(e);
-            }
-        }
-    }
-
-    /**
-     * Adder.
-     */
-    private class AdderImpl extends AdderBase {
-        /** */
-        private final Comparator<Object> cmp;
-
-        /** */
-        private final Random rnd = new GridRandom();
-
-        /** */
-        private final GridLongList stack = new GridLongList(16);
-
-        /** */
-        private final Reader keyReader;
-
-        /**
-         * @param ctx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
-            super(ctx);
-
-            keyReader = new Reader(keySer);
-
-            cmp = ctx.sortComparator();
-        }
-
-        /** {@inheritDoc} */
-        @Override public void write(Object key, Object val) throws IgniteCheckedException {
-            A.notNull(val, "val");
-
-            add(key, val);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
-            KeyImpl k = reuse == null ? new KeyImpl() : (KeyImpl)reuse;
-
-            k.tmpKey = keySer.read(in, k.tmpKey);
-
-            k.meta = add(k.tmpKey, null);
-
-            return k;
-        }
-
-        /**
-         * @param key Key.
-         * @param val Value.
-         * @param level Level.
-         * @return Meta pointer.
-         */
-        private long createMeta(long key, long val, int level) {
-            int size = 32 + 8 * level;
-
-            long meta = allocate(size);
-
-            key(meta, key);
-            value(meta, val);
-            lastVisitedValue(meta, 0L);
-
-            for (int i = 32; i < size; i += 8) // Fill with 0.
-                mem.writeLong(meta + i, 0L);
-
-            return meta;
-        }
-
-        /**
-         * @param key Key.
-         * @return Pointer.
-         * @throws IgniteCheckedException If failed.
-         */
-        private long writeKey(Object key) throws IgniteCheckedException {
-            long keyPtr = write(4, key, keySer);
-            int keySize = writtenSize() - 4;
-
-            keySize(keyPtr, keySize);
-
-            return keyPtr;
-        }
-
-        /**
-         * @param prevMeta Previous meta.
-         * @param meta Next meta.
-         */
-        private void stackPush(long prevMeta, long meta) {
-            stack.add(prevMeta);
-            stack.add(meta);
-        }
-
-        /**
-         * Drops last remembered frame from the stack.
-         */
-        private void stackPop() {
-            stack.pop(2);
-        }
-
-        /**
-         * @param key Key.
-         * @param val Value.
-         * @return Meta pointer.
-         * @throws IgniteCheckedException If failed.
-         */
-        private long add(Object key, @Nullable Object val) throws IgniteCheckedException {
-            assert key != null;
-
-            stack.clear();
-
-            long valPtr = 0;
-
-            if (val != null) { // Write value.
-                valPtr = write(12, val, valSer);
-                int valSize = writtenSize() - 12;
-
-                nextValue(valPtr, 0);
-                valueSize(valPtr, valSize);
-            }
-
-            long keyPtr = 0;
-            long newMeta = 0;
-            int newMetaLevel = -1;
-
-            long prevMeta = heads;
-            int level = topLevel.get();
-            long meta = level < 0 ? 0 : nextMeta(heads, level);
-
-            for (;;) {
-                if (level < 0) { // We did not find our key, trying to add new meta.
-                    if (keyPtr == 0) { // Write key and create meta only once.
-                        keyPtr = writeKey(key);
-
-                        newMetaLevel = randomLevel(rnd);
-                        newMeta = createMeta(keyPtr, valPtr, newMetaLevel);
-                    }
-
-                    nextMeta(newMeta, 0, meta); // Set next to new meta before publishing.
-
-                    if (casNextMeta(prevMeta, 0, meta, newMeta)) { // New key was added successfully.
-                        laceUp(key, newMeta, newMetaLevel);
-
-                        return newMeta;
-                    }
-                    else { // Add failed, need to check out what was added by another thread.
-                        meta = nextMeta(prevMeta, level = 0);
-
-                        stackPop();
-                    }
-                }
-
-                int cmpRes = cmp(key, meta);
-
-                if (cmpRes == 0) { // Key found.
-                    if (newMeta != 0)  // Deallocate if we've allocated something.
-                        localDeallocate(keyPtr);
-
-                    if (valPtr == 0) // Only key needs to be added.
-                        return meta;
-
-                    for (;;) { // Add value for the key found.
-                        long nextVal = value(meta);
-
-                        nextValue(valPtr, nextVal);
-
-                        if (casValue(meta, nextVal, valPtr))
-                            return meta;
-                    }
-                }
-
-                assert cmpRes != 0;
-
-                if (cmpRes > 0) { // Go right.
-                    prevMeta = meta;
-                    meta = nextMeta(meta, level);
-
-                    if (meta != 0) // If nothing to the right then go down.
-                        continue;
-                }
-
-                while (--level >= 0) { // Go down.
-                    stackPush(prevMeta, meta); // Remember the path.
-
-                    long nextMeta = nextMeta(prevMeta, level);
-
-                    if (nextMeta != meta) { // If the meta is the same as on upper level go deeper.
-                        meta = nextMeta;
-
-                        assert meta != 0;
-
-                        break;
-                    }
-                }
-            }
-        }
-
-        /**
-         * @param key Key.
-         * @param meta Meta pointer.
-         * @return Comparison result.
-         */
-        @SuppressWarnings("unchecked")
-        private int cmp(Object key, long meta) {
-            assert meta != 0;
-
-            return cmp.compare(key, keyReader.readKey(meta));
-        }
-
-        /**
-         * Adds appropriate index links between metas.
-         *
-         * @param newMeta Just added meta.
-         * @param newMetaLevel New level.
-         */
-        private void laceUp(Object key, long newMeta, int newMetaLevel) {
-            for (int level = 1; level <= newMetaLevel; level++) { // Go from the bottom up.
-                long prevMeta = heads;
-                long meta = 0;
-
-                if (!stack.isEmpty()) { // Get the path back.
-                    meta = stack.remove();
-                    prevMeta = stack.remove();
-                }
-
-                for (;;) {
-                    nextMeta(newMeta, level, meta);
-
-                    if (casNextMeta(prevMeta, level, meta, newMeta))
-                        break;
-
-                    long oldMeta = meta;
-
-                    meta = nextMeta(prevMeta, level); // Reread meta.
-
-                    for (;;) {
-                        int cmpRes = cmp(key, meta);
-
-                        if (cmpRes > 0) { // Go right.
-                            prevMeta = meta;
-                            meta = nextMeta(prevMeta, level);
-
-                            if (meta != oldMeta) // Old meta already known to be greater than ours or is 0.
-                                continue;
-                        }
-
-                        assert cmpRes != 0; // Two different metas with equal keys must be impossible.
-
-                        break; // Retry cas.
-                    }
-                }
-            }
-
-            if (!stack.isEmpty())
-                return; // Our level already lower than top.
-
-            for (;;) { // Raise top level.
-                int top = topLevel.get();
-
-                if (newMetaLevel <= top || topLevel.compareAndSet(top, newMetaLevel))
-                    break;
-            }
-        }
-
-        /**
-         * Key.
-         */
-        private class KeyImpl implements Key {
-            /** */
-            private long meta;
-
-            /** */
-            private Object tmpKey;
-
-            /**
-             * @return Meta pointer for the key.
-             */
-            public long address() {
-                return meta;
-            }
-
-            /**
-             * @param val Value.
-             */
-            @Override public void add(Value val) {
-                int size = val.size();
-
-                long valPtr = allocate(size + 12);
-
-                val.copyTo(valPtr + 12);
-
-                valueSize(valPtr, size);
-
-                long nextVal;
-
-                do {
-                    nextVal = value(meta);
-
-                    nextValue(valPtr, nextVal);
-                }
-                while(!casValue(meta, nextVal, valPtr));
-            }
-        }
-    }
-
-    /**
-     * Task input.
-     */
-    private class Input implements HadoopTaskInput {
-        /** */
-        private long metaPtr = heads;
-
-        /** */
-        private final Reader keyReader;
-
-        /** */
-        private final Reader valReader;
-
-        /**
-         * @param taskCtx Task context.
-         * @throws IgniteCheckedException If failed.
-         */
-        private Input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-            keyReader = new Reader(taskCtx.keySerialization());
-            valReader = new Reader(taskCtx.valueSerialization());
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean next() {
-            metaPtr = nextMeta(metaPtr, 0);
-
-            return metaPtr != 0;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Object key() {
-            return keyReader.readKey(metaPtr);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Iterator<?> values() {
-            return new ValueIterator(value(metaPtr), valReader);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            keyReader.close();
-            valReader.close();
-        }
-    }
-
-    /**
-     * Grouped input using grouping comparator.
-     */
-    private class GroupedInput implements HadoopTaskInput {
-        /** */
-        private final Comparator<Object> grpCmp;
-
-        /** */
-        private final Input in;
-
-        /** */
-        private Object prevKey;
-
-        /** */
-        private Object nextKey;
-
-        /** */
-        private final GridLongList vals = new GridLongList();
-
-        /**
-         * @param grpCmp Grouping comparator.
-         * @param in Input.
-         */
-        private GroupedInput(Comparator<Object> grpCmp, Input in) {
-            this.grpCmp = grpCmp;
-            this.in = in;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean next() {
-            if (prevKey == null) { // First call.
-                if (!in.next())
-                    return false;
-
-                prevKey = in.key();
-
-                assert prevKey != null;
-
-                in.keyReader.resetReusedObject(null); // We need 2 instances of key object for comparison.
-
-                vals.add(value(in.metaPtr));
-            }
-            else {
-                if (in.metaPtr == 0) // We reached the end of the input.
-                    return false;
-
-                vals.clear();
-
-                vals.add(value(in.metaPtr));
-
-                in.keyReader.resetReusedObject(prevKey); // Switch key instances.
-
-                prevKey = nextKey;
-            }
-
-            while (in.next()) { // Fill with head value pointers with equal keys.
-                if (grpCmp.compare(prevKey, nextKey = in.key()) == 0)
-                    vals.add(value(in.metaPtr));
-                else
-                    break;
-            }
-
-            assert !vals.isEmpty();
-
-            return true;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Object key() {
-            return prevKey;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Iterator<?> values() {
-            assert !vals.isEmpty();
-
-            final ValueIterator valIter = new ValueIterator(vals.get(0), in.valReader);
-
-            return new Iterator<Object>() {
-                /** */
-                private int idx;
-
-                @Override public boolean hasNext() {
-                    if (!valIter.hasNext()) {
-                        if (++idx == vals.size())
-                            return false;
-
-                        valIter.head(vals.get(idx));
-
-                        assert valIter.hasNext();
-                    }
-
-                    return true;
-                }
-
-                @Override public Object next() {
-                    return valIter.next();
-                }
-
-                @Override public void remove() {
-                    valIter.remove();
-                }
-            };
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() throws IgniteCheckedException {
-            in.close();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
deleted file mode 100644
index 3b5fa15..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
-
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-
-/**
- * Data input stream.
- */
-public class HadoopDataInStream extends InputStream implements DataInput {
-    /** */
-    private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0);
-
-    /** */
-    private final GridUnsafeMemory mem;
-
-    /**
-     * @param mem Memory.
-     */
-    public HadoopDataInStream(GridUnsafeMemory mem) {
-        assert mem != null;
-
-        this.mem = mem;
-    }
-
-    /**
-     * @return Buffer.
-     */
-    public HadoopOffheapBuffer buffer() {
-        return buf;
-    }
-
-    /**
-     * @param size Size.
-     * @return Old pointer.
-     */
-    protected long move(long size) throws IOException {
-        long ptr = buf.move(size);
-
-        assert ptr != 0;
-
-        return ptr;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read() throws IOException {
-        return readUnsignedByte();
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read(byte[] b, int off, int len) throws IOException {
-        readFully(b, off, len);
-
-        return len;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long skip(long n) throws IOException {
-        move(n);
-
-        return n;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFully(byte[] b) throws IOException {
-        readFully(b, 0, b.length);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readFully(byte[] b, int off, int len) throws IOException {
-        mem.readBytes(move(len), b, off, len);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int skipBytes(int n) throws IOException {
-        move(n);
-
-        return n;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean readBoolean() throws IOException {
-        byte res = readByte();
-
-        if (res == 1)
-            return true;
-
-        assert res == 0 : res;
-
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public byte readByte() throws IOException {
-        return mem.readByte(move(1));
-    }
-
-    /** {@inheritDoc} */
-    @Override public int readUnsignedByte() throws IOException {
-        return readByte() & 0xff;
-    }
-
-    /** {@inheritDoc} */
-    @Override public short readShort() throws IOException {
-        return mem.readShort(move(2));
-    }
-
-    /** {@inheritDoc} */
-    @Override public int readUnsignedShort() throws IOException {
-        return readShort() & 0xffff;
-    }
-
-    /** {@inheritDoc} */
-    @Override public char readChar() throws IOException {
-        return (char)readShort();
-    }
-
-    /** {@inheritDoc} */
-    @Override public int readInt() throws IOException {
-        return mem.readInt(move(4));
-    }
-
-    /** {@inheritDoc} */
-    @Override public long readLong() throws IOException {
-        return mem.readLong(move(8));
-    }
-
-    /** {@inheritDoc} */
-    @Override public float readFloat() throws IOException {
-        return mem.readFloat(move(4));
-    }
-
-    /** {@inheritDoc} */
-    @Override public double readDouble() throws IOException {
-        return mem.readDouble(move(8));
-    }
-
-    /** {@inheritDoc} */
-    @Override public String readLine() throws IOException {
-        throw new UnsupportedOperationException();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String readUTF() throws IOException {
-        byte[] bytes = new byte[readInt()];
-
-        if (bytes.length != 0)
-            readFully(bytes);
-
-        return new String(bytes, StandardCharsets.UTF_8);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
deleted file mode 100644
index f7b1a73..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
-
-import java.io.DataOutput;
-import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
-import org.apache.ignite.internal.util.GridUnsafe;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-
-/**
- * Data output stream.
- */
-public class HadoopDataOutStream extends OutputStream implements DataOutput {
-    /** */
-    private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0);
-
-    /** */
-    private final GridUnsafeMemory mem;
-
-    /**
-     * @param mem Memory.
-     */
-    public HadoopDataOutStream(GridUnsafeMemory mem) {
-        this.mem = mem;
-    }
-
-    /**
-     * @return Buffer.
-     */
-    public HadoopOffheapBuffer buffer() {
-        return buf;
-    }
-
-    /**
-     * @param size Size.
-     * @return Old pointer or {@code 0} if move was impossible.
-     */
-    public long move(long size) {
-        return buf.move(size);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(int b) {
-        writeByte(b);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(byte[] b) {
-        write(b, 0, b.length);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(byte[] b, int off, int len) {
-        GridUnsafe.copyMemory(b, GridUnsafe.BYTE_ARR_OFF + off, null, move(len), len);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeBoolean(boolean v) {
-        writeByte(v ? 1 : 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeByte(int v) {
-        mem.writeByte(move(1), (byte)v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeShort(int v) {
-        mem.writeShort(move(2), (short)v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeChar(int v) {
-        writeShort(v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeInt(int v) {
-        mem.writeInt(move(4), v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeLong(long v) {
-        mem.writeLong(move(8), v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeFloat(float v) {
-        mem.writeFloat(move(4), v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeDouble(double v) {
-        mem.writeDouble(move(8), v);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeBytes(String s) {
-        writeUTF(s);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeChars(String s) {
-        writeUTF(s);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeUTF(String s) {
-        byte[] b = s.getBytes(StandardCharsets.UTF_8);
-
-        writeInt(b.length);
-        write(b);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
deleted file mode 100644
index acc9be6..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
-
-/**
- * Offheap buffer.
- */
-public class HadoopOffheapBuffer {
-    /** Buffer begin address. */
-    private long bufPtr;
-
-    /** The first address we do not own. */
-    private long bufEnd;
-
-    /** Current read or write pointer. */
-    private long posPtr;
-
-    /**
-     * @param bufPtr Pointer to buffer begin.
-     * @param bufSize Size of the buffer.
-     */
-    public HadoopOffheapBuffer(long bufPtr, long bufSize) {
-        set(bufPtr, bufSize);
-    }
-
-    /**
-     * @param bufPtr Pointer to buffer begin.
-     * @param bufSize Size of the buffer.
-     */
-    public void set(long bufPtr, long bufSize) {
-        this.bufPtr = bufPtr;
-
-        posPtr = bufPtr;
-        bufEnd = bufPtr + bufSize;
-    }
-
-    /**
-     * @return Pointer to internal buffer begin.
-     */
-    public long begin() {
-        return bufPtr;
-    }
-
-    /**
-     * @return Buffer capacity.
-     */
-    public long capacity() {
-        return bufEnd - bufPtr;
-    }
-
-    /**
-     * @return Remaining capacity.
-     */
-    public long remaining() {
-        return bufEnd - posPtr;
-    }
-
-    /**
-     * @return Absolute pointer to the current position inside of the buffer.
-     */
-    public long pointer() {
-        return posPtr;
-    }
-
-    /**
-     * @param ptr Absolute pointer to the current position inside of the buffer.
-     */
-    public void pointer(long ptr) {
-        assert ptr >= bufPtr : bufPtr + " <= " + ptr;
-        assert ptr <= bufEnd : bufEnd + " <= " + bufPtr;
-
-        posPtr = ptr;
-    }
-
-    /**
-     * @param size Size move on.
-     * @return Old position pointer or {@code 0} if move goes beyond the end of the buffer.
-     */
-    public long move(long size) {
-        assert size > 0 : size;
-
-        long oldPos = posPtr;
-        long newPos = oldPos + size;
-
-        if (newPos > bufEnd)
-            return 0;
-
-        posPtr = newPos;
-
-        return oldPos;
-    }
-
-    /**
-     * @param ptr Pointer.
-     * @return {@code true} If the given pointer is inside of this buffer.
-     */
-    public boolean isInside(long ptr) {
-        return ptr >= bufPtr && ptr <= bufEnd;
-    }
-
-    /**
-     * Resets position to the beginning of buffer.
-     */
-    public void reset() {
-        posPtr = bufPtr;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
deleted file mode 100644
index 5ede18e..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-import java.util.Collection;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-
-/**
- * Task executor.
- */
-public class HadoopEmbeddedTaskExecutor extends HadoopTaskExecutorAdapter {
-    /** Job tracker. */
-    private HadoopJobTracker jobTracker;
-
-    /** */
-    private final ConcurrentMap<HadoopJobId, Collection<HadoopRunnableTask>> jobs = new ConcurrentHashMap<>();
-
-    /** Executor service to run tasks. */
-    private HadoopExecutorService exec;
-
-    /** {@inheritDoc} */
-    @Override public void onKernalStart() throws IgniteCheckedException {
-        super.onKernalStart();
-
-        jobTracker = ctx.jobTracker();
-
-        exec = new HadoopExecutorService(log, ctx.kernalContext().gridName(),
-            ctx.configuration().getMaxParallelTasks(), ctx.configuration().getMaxTaskQueueSize());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onKernalStop(boolean cancel) {
-        if (exec != null) {
-            exec.shutdown(3000);
-
-            if (cancel) {
-                for (HadoopJobId jobId : jobs.keySet())
-                    cancelTasks(jobId);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop(boolean cancel) {
-        if (exec != null && !exec.shutdown(30000))
-            U.warn(log, "Failed to finish running tasks in 30 sec.");
-    }
-
-    /** {@inheritDoc} */
-    @Override public void run(final HadoopJob job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException {
-        if (log.isDebugEnabled())
-            log.debug("Submitting tasks for local execution [locNodeId=" + ctx.localNodeId() +
-                ", tasksCnt=" + tasks.size() + ']');
-
-        Collection<HadoopRunnableTask> executedTasks = jobs.get(job.id());
-
-        if (executedTasks == null) {
-            executedTasks = new GridConcurrentHashSet<>();
-
-            Collection<HadoopRunnableTask> extractedCol = jobs.put(job.id(), executedTasks);
-
-            assert extractedCol == null;
-        }
-
-        final Collection<HadoopRunnableTask> finalExecutedTasks = executedTasks;
-
-        for (final HadoopTaskInfo info : tasks) {
-            assert info != null;
-
-            HadoopRunnableTask task = new HadoopRunnableTask(log, job, ctx.shuffle().memory(), info,
-                ctx.localNodeId()) {
-                @Override protected void onTaskFinished(HadoopTaskStatus status) {
-                    if (log.isDebugEnabled())
-                        log.debug("Finished task execution [jobId=" + job.id() + ", taskInfo=" + info + ", " +
-                            "waitTime=" + waitTime() + ", execTime=" + executionTime() + ']');
-
-                    finalExecutedTasks.remove(this);
-
-                    jobTracker.onTaskFinished(info, status);
-                }
-
-                @Override protected HadoopTaskInput createInput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-                    return ctx.shuffle().input(taskCtx);
-                }
-
-                @Override protected HadoopTaskOutput createOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-                    return ctx.shuffle().output(taskCtx);
-                }
-            };
-
-            executedTasks.add(task);
-
-            exec.submit(task);
-        }
-    }
-
-    /**
-     * Cancels all currently running tasks for given job ID and cancels scheduled execution of tasks
-     * for this job ID.
-     * <p>
-     * It is guaranteed that this method will not be called concurrently with
-     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via
-     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called.
-     *
-     * @param jobId Job ID to cancel.
-     */
-    @Override public void cancelTasks(HadoopJobId jobId) {
-        Collection<HadoopRunnableTask> executedTasks = jobs.get(jobId);
-
-        if (executedTasks != null) {
-            for (HadoopRunnableTask task : executedTasks)
-                task.cancel();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException {
-        if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) {
-            Collection<HadoopRunnableTask> executedTasks = jobs.remove(meta.jobId());
-
-            assert executedTasks == null || executedTasks.isEmpty();
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
deleted file mode 100644
index 993ecc9..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-
-import java.util.Collection;
-import java.util.concurrent.Callable;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.util.worker.GridWorker;
-import org.apache.ignite.internal.util.worker.GridWorkerListener;
-import org.apache.ignite.internal.util.worker.GridWorkerListenerAdapter;
-import org.apache.ignite.thread.IgniteThread;
-import org.jsr166.ConcurrentHashMap8;
-
-import static java.util.Collections.newSetFromMap;
-
-/**
- * Executor service without thread pooling.
- */
-public class HadoopExecutorService {
-    /** */
-    private final LinkedBlockingQueue<Callable<?>> queue;
-
-    /** */
-    private final Collection<GridWorker> workers = newSetFromMap(new ConcurrentHashMap8<GridWorker, Boolean>());
-
-    /** */
-    private final AtomicInteger active = new AtomicInteger();
-
-    /** */
-    private final int maxTasks;
-
-    /** */
-    private final String gridName;
-
-    /** */
-    private final IgniteLogger log;
-
-    /** */
-    private volatile boolean shutdown;
-
-    /** */
-    private final GridWorkerListener lsnr = new GridWorkerListenerAdapter() {
-            @Override public void onStopped(GridWorker w) {
-                workers.remove(w);
-
-                if (shutdown) {
-                    active.decrementAndGet();
-
-                    return;
-                }
-
-                Callable<?> task = queue.poll();
-
-                if (task != null)
-                    startThread(task);
-                else {
-                    active.decrementAndGet();
-
-                    if (!queue.isEmpty())
-                        startFromQueue();
-                }
-            }
-        };
-
-    /**
-     * @param log Logger.
-     * @param gridName Grid name.
-     * @param maxTasks Max number of tasks.
-     * @param maxQueue Max queue length.
-     */
-    public HadoopExecutorService(IgniteLogger log, String gridName, int maxTasks, int maxQueue) {
-        assert maxTasks > 0 : maxTasks;
-        assert maxQueue > 0 : maxQueue;
-
-        this.maxTasks = maxTasks;
-        this.queue = new LinkedBlockingQueue<>(maxQueue);
-        this.gridName = gridName;
-        this.log = log.getLogger(HadoopExecutorService.class);
-    }
-
-    /**
-     * @return Number of active workers.
-     */
-    public int active() {
-        return workers.size();
-    }
-
-    /**
-     * Submit task.
-     *
-     * @param task Task.
-     */
-    public void submit(Callable<?> task) {
-        while (queue.isEmpty()) {
-            int active0 = active.get();
-
-            if (active0 == maxTasks)
-                break;
-
-            if (active.compareAndSet(active0, active0 + 1)) {
-                startThread(task);
-
-                return; // Started in new thread bypassing queue.
-            }
-        }
-
-        try {
-            while (!queue.offer(task, 100, TimeUnit.MILLISECONDS)) {
-                if (shutdown)
-                    return; // Rejected due to shutdown.
-            }
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            return;
-        }
-
-        startFromQueue();
-    }
-
-    /**
-     * Attempts to start task from queue.
-     */
-    private void startFromQueue() {
-        do {
-            int active0 = active.get();
-
-            if (active0 == maxTasks)
-                break;
-
-            if (active.compareAndSet(active0, active0 + 1)) {
-                Callable<?> task = queue.poll();
-
-                if (task == null) {
-                    int res = active.decrementAndGet();
-
-                    assert res >= 0 : res;
-
-                    break;
-                }
-
-                startThread(task);
-            }
-        }
-        while (!queue.isEmpty());
-    }
-
-    /**
-     * @param task Task.
-     */
-    private void startThread(final Callable<?> task) {
-        String workerName;
-
-        if (task instanceof HadoopRunnableTask) {
-            final HadoopTaskInfo i = ((HadoopRunnableTask)task).taskInfo();
-
-            workerName = "Hadoop-task-" + i.jobId() + "-" + i.type() + "-" + i.taskNumber() + "-" + i.attempt();
-        }
-        else
-            workerName = task.toString();
-
-        GridWorker w = new GridWorker(gridName, workerName, log, lsnr) {
-            @Override protected void body() {
-                try {
-                    task.call();
-                }
-                catch (Exception e) {
-                    log.error("Failed to execute task: " + task, e);
-                }
-            }
-        };
-
-        workers.add(w);
-
-        if (shutdown)
-            w.cancel();
-
-        new IgniteThread(w).start();
-    }
-
-    /**
-     * Shuts down this executor service.
-     *
-     * @param awaitTimeMillis Time in milliseconds to wait for tasks completion.
-     * @return {@code true} If all tasks completed.
-     */
-    public boolean shutdown(long awaitTimeMillis) {
-        shutdown = true;
-
-        for (GridWorker w : workers)
-            w.cancel();
-
-        while (awaitTimeMillis > 0 && !workers.isEmpty()) {
-            try {
-                Thread.sleep(100);
-
-                awaitTimeMillis -= 100;
-            }
-            catch (InterruptedException e) {
-                break;
-            }
-        }
-
-        return workers.isEmpty();
-    }
-
-    /**
-     * @return {@code true} If method {@linkplain #shutdown(long)} was already called.
-     */
-    public boolean isShutdown() {
-        return shutdown;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
deleted file mode 100644
index a57efe6..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopHashMultimap;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap;
-import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipList;
-import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.COMBINER_HASHMAP_SIZE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_COMBINER_NO_SORTING;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.COMBINE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
-
-/**
- * Runnable task.
- */
-public abstract class HadoopRunnableTask implements Callable<Void> {
-    /** */
-    private final GridUnsafeMemory mem;
-
-    /** */
-    private final IgniteLogger log;
-
-    /** */
-    private final HadoopJob job;
-
-    /** Task to run. */
-    private final HadoopTaskInfo info;
-
-    /** Submit time. */
-    private final long submitTs = U.currentTimeMillis();
-
-    /** Execution start timestamp. */
-    private long execStartTs;
-
-    /** Execution end timestamp. */
-    private long execEndTs;
-
-    /** */
-    private HadoopMultimap combinerInput;
-
-    /** */
-    private volatile HadoopTaskContext ctx;
-
-    /** Set if task is to cancelling. */
-    private volatile boolean cancelled;
-
-    /** Node id. */
-    private UUID nodeId;
-
-    /**
-     * @param log Log.
-     * @param job Job.
-     * @param mem Memory.
-     * @param info Task info.
-     * @param nodeId Node id.
-     */
-    protected HadoopRunnableTask(IgniteLogger log, HadoopJob job, GridUnsafeMemory mem, HadoopTaskInfo info,
-        UUID nodeId) {
-        this.nodeId = nodeId;
-        this.log = log.getLogger(HadoopRunnableTask.class);
-        this.job = job;
-        this.mem = mem;
-        this.info = info;
-    }
-
-    /**
-     * @return Wait time.
-     */
-    public long waitTime() {
-        return execStartTs - submitTs;
-    }
-
-    /**
-     * @return Execution time.
-     */
-    public long executionTime() {
-        return execEndTs - execStartTs;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Void call() throws IgniteCheckedException {
-        ctx = job.getTaskContext(info);
-
-        return ctx.runAsJobOwner(new Callable<Void>() {
-            @Override public Void call() throws Exception {
-                call0();
-
-                return null;
-            }
-        });
-    }
-
-    /**
-     * Implements actual task running.
-     * @throws IgniteCheckedException
-     */
-    void call0() throws IgniteCheckedException {
-        execStartTs = U.currentTimeMillis();
-
-        Throwable err = null;
-
-        HadoopTaskState state = HadoopTaskState.COMPLETED;
-
-        HadoopPerformanceCounter perfCntr = null;
-
-        try {
-            perfCntr = HadoopPerformanceCounter.getCounter(ctx.counters(), nodeId);
-
-            perfCntr.onTaskSubmit(info, submitTs);
-            perfCntr.onTaskPrepare(info, execStartTs);
-
-            ctx.prepareTaskEnvironment();
-
-            runTask(perfCntr);
-
-            if (info.type() == MAP && job.info().hasCombiner()) {
-                ctx.taskInfo(new HadoopTaskInfo(COMBINE, info.jobId(), info.taskNumber(), info.attempt(), null));
-
-                try {
-                    runTask(perfCntr);
-                }
-                finally {
-                    ctx.taskInfo(info);
-                }
-            }
-        }
-        catch (HadoopTaskCancelledException ignored) {
-            state = HadoopTaskState.CANCELED;
-        }
-        catch (Throwable e) {
-            state = HadoopTaskState.FAILED;
-            err = e;
-
-            U.error(log, "Task execution failed.", e);
-
-            if (e instanceof Error)
-                throw e;
-        }
-        finally {
-            execEndTs = U.currentTimeMillis();
-
-            if (perfCntr != null)
-                perfCntr.onTaskFinish(info, execEndTs);
-
-            onTaskFinished(new HadoopTaskStatus(state, err, ctx==null ? null : ctx.counters()));
-
-            if (combinerInput != null)
-                combinerInput.close();
-
-            if (ctx != null)
-                ctx.cleanupTaskEnvironment();
-        }
-    }
-
-    /**
-     * @param perfCntr Performance counter.
-     * @throws IgniteCheckedException If failed.
-     */
-    private void runTask(HadoopPerformanceCounter perfCntr) throws IgniteCheckedException {
-        if (cancelled)
-            throw new HadoopTaskCancelledException("Task cancelled.");
-
-        try (HadoopTaskOutput out = createOutputInternal(ctx);
-             HadoopTaskInput in = createInputInternal(ctx)) {
-
-            ctx.input(in);
-            ctx.output(out);
-
-            perfCntr.onTaskStart(ctx.taskInfo(), U.currentTimeMillis());
-
-            ctx.run();
-        }
-    }
-
-    /**
-     * Cancel the executed task.
-     */
-    public void cancel() {
-        cancelled = true;
-
-        if (ctx != null)
-            ctx.cancel();
-    }
-
-    /**
-     * @param status Task status.
-     */
-    protected abstract void onTaskFinished(HadoopTaskStatus status);
-
-    /**
-     * @param ctx Task context.
-     * @return Task input.
-     * @throws IgniteCheckedException If failed.
-     */
-    @SuppressWarnings("unchecked")
-    private HadoopTaskInput createInputInternal(HadoopTaskContext ctx) throws IgniteCheckedException {
-        switch (ctx.taskInfo().type()) {
-            case SETUP:
-            case MAP:
-            case COMMIT:
-            case ABORT:
-                return null;
-
-            case COMBINE:
-                assert combinerInput != null;
-
-                return combinerInput.input(ctx);
-
-            default:
-                return createInput(ctx);
-        }
-    }
-
-    /**
-     * @param ctx Task context.
-     * @return Input.
-     * @throws IgniteCheckedException If failed.
-     */
-    protected abstract HadoopTaskInput createInput(HadoopTaskContext ctx) throws IgniteCheckedException;
-
-    /**
-     * @param ctx Task info.
-     * @return Output.
-     * @throws IgniteCheckedException If failed.
-     */
-    protected abstract HadoopTaskOutput createOutput(HadoopTaskContext ctx) throws IgniteCheckedException;
-
-    /**
-     * @param ctx Task info.
-     * @return Task output.
-     * @throws IgniteCheckedException If failed.
-     */
-    private HadoopTaskOutput createOutputInternal(HadoopTaskContext ctx) throws IgniteCheckedException {
-        switch (ctx.taskInfo().type()) {
-            case SETUP:
-            case REDUCE:
-            case COMMIT:
-            case ABORT:
-                return null;
-
-            case MAP:
-                if (job.info().hasCombiner()) {
-                    assert combinerInput == null;
-
-                    combinerInput = get(job.info(), SHUFFLE_COMBINER_NO_SORTING, false) ?
-                        new HadoopHashMultimap(job.info(), mem, get(job.info(), COMBINER_HASHMAP_SIZE, 8 * 1024)):
-                        new HadoopSkipList(job.info(), mem); // TODO replace with red-black tree
-
-                    return combinerInput.startAdding(ctx);
-                }
-
-            default:
-                return createOutput(ctx);
-        }
-    }
-
-    /**
-     * @return Task info.
-     */
-    public HadoopTaskInfo taskInfo() {
-        return info;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
deleted file mode 100644
index f13c76a..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-import java.util.Collection;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
-
-/**
- * Common superclass for task executor.
- */
-public abstract class HadoopTaskExecutorAdapter extends HadoopComponent {
-    /**
-     * Runs tasks.
-     *
-     * @param job Job.
-     * @param tasks Tasks.
-     * @throws IgniteCheckedException If failed.
-     */
-    public abstract void run(final HadoopJob job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException;
-
-    /**
-     * Cancels all currently running tasks for given job ID and cancels scheduled execution of tasks
-     * for this job ID.
-     * <p>
-     * It is guaranteed that this method will not be called concurrently with
-     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via
-     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called.
-     *
-     * @param jobId Job ID to cancel.
-     */
-    public abstract void cancelTasks(HadoopJobId jobId) throws IgniteCheckedException;
-
-    /**
-     * On job state change callback;
-     *
-     * @param meta Job metadata.
-     */
-    public abstract void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
deleted file mode 100644
index b22d291..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-/**
-* State of the task.
-*/
-public enum HadoopTaskState {
-    /** Running task. */
-    RUNNING,
-
-    /** Completed task. */
-    COMPLETED,
-
-    /** Failed task. */
-    FAILED,
-
-    /** Canceled task. */
-    CANCELED,
-
-    /** Process crashed. */
-    CRASHED
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
deleted file mode 100644
index fa09ff7..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.taskexecutor;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Task status.
- */
-public class HadoopTaskStatus implements Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    private HadoopTaskState state;
-
-    /** */
-    private Throwable failCause;
-
-    /** */
-    private HadoopCounters cntrs;
-
-    /**
-     * Default constructor required by {@link Externalizable}.
-     */
-    public HadoopTaskStatus() {
-        // No-op.
-    }
-
-    /**
-     * Creates new instance.
-     *
-     * @param state Task state.
-     * @param failCause Failure cause (if any).
-     */
-    public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause) {
-        this(state, failCause, null);
-    }
-
-    /**
-     * Creates new instance.
-     *
-     * @param state Task state.
-     * @param failCause Failure cause (if any).
-     * @param cntrs Task counters.
-     */
-    public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause,
-        @Nullable HadoopCounters cntrs) {
-        assert state != null;
-
-        this.state = state;
-        this.failCause = failCause;
-        this.cntrs = cntrs;
-    }
-
-    /**
-     * @return State.
-     */
-    public HadoopTaskState state() {
-        return state;
-    }
-
-    /**
-     * @return Fail cause.
-     */
-    @Nullable public Throwable failCause() {
-        return failCause;
-    }
-
-    /**
-     * @return Counters.
-     */
-    @Nullable public HadoopCounters counters() {
-        return cntrs;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopTaskStatus.class, this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeObject(state);
-        out.writeObject(failCause);
-        out.writeObject(cntrs);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        state = (HadoopTaskState)in.readObject();
-        failCause = (Throwable)in.readObject();
-        cntrs = (HadoopCounters)in.readObject();
-    }
-}
\ No newline at end of file


[07/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
deleted file mode 100644
index 93a924c..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java
+++ /dev/null
@@ -1,2040 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.BufferedOutputStream;
-import java.io.Closeable;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayDeque;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.EnumSet;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.spi.communication.CommunicationSpi;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ThreadLocalRandom8;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * Hadoop 2.x compliant file system.
- */
-public abstract class HadoopIgfs20FileSystemAbstractSelfTest extends IgfsCommonAbstractTest {
-    /** Group size. */
-    public static final int GRP_SIZE = 128;
-
-    /** Thread count for multithreaded tests. */
-    private static final int THREAD_CNT = 8;
-
-    /** Secondary file system user. */
-    private static final String SECONDARY_FS_USER = "secondary-default";
-
-    /** IP finder. */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Barrier for multithreaded tests. */
-    private static CyclicBarrier barrier;
-
-    /** File system. */
-    private static AbstractFileSystem fs;
-
-    /** Default IGFS mode. */
-    protected IgfsMode mode;
-
-    /** Primary file system URI. */
-    protected URI primaryFsUri;
-
-    /** Primary file system configuration. */
-    protected Configuration primaryFsCfg;
-
-    /**
-     * Constructor.
-     *
-     * @param mode Default IGFS mode.
-     */
-    protected HadoopIgfs20FileSystemAbstractSelfTest(IgfsMode mode) {
-        this.mode = mode;
-    }
-
-    /**
-     * Gets primary file system URI path.
-     *
-     * @return Primary file system URI path.
-     */
-    protected abstract String primaryFileSystemUriPath();
-
-    /**
-     * Gets primary file system config path.
-     *
-     * @return Primary file system config path.
-     */
-    protected abstract String primaryFileSystemConfigPath();
-
-    /**
-     * Get primary IPC endpoint configuration.
-     *
-     * @param gridName Grid name.
-     * @return IPC primary endpoint configuration.
-     */
-    protected abstract IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(String gridName);
-
-    /**
-     * Gets secondary file system URI path.
-     *
-     * @return Secondary file system URI path.
-     */
-    protected abstract String secondaryFileSystemUriPath();
-
-    /**
-     * Gets secondary file system config path.
-     *
-     * @return Secondary file system config path.
-     */
-    protected abstract String secondaryFileSystemConfigPath();
-
-    /**
-     * Get secondary IPC endpoint configuration.
-     *
-     * @return Secondary IPC endpoint configuration.
-     */
-    protected abstract IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration();
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        startNodes();
-    }
-
-    /**
-     * Starts the nodes for this test.
-     *
-     * @throws Exception If failed.
-     */
-    private void startNodes() throws Exception {
-        if (mode != PRIMARY) {
-            // Start secondary IGFS.
-            FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-            igfsCfg.setDataCacheName("partitioned");
-            igfsCfg.setMetaCacheName("replicated");
-            igfsCfg.setName("igfs_secondary");
-            igfsCfg.setIpcEndpointConfiguration(secondaryIpcEndpointConfiguration());
-            igfsCfg.setManagementPort(-1);
-            igfsCfg.setBlockSize(512 * 1024);
-            igfsCfg.setPrefetchBlocks(1);
-
-            CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-            cacheCfg.setName("partitioned");
-            cacheCfg.setCacheMode(PARTITIONED);
-            cacheCfg.setNearConfiguration(null);
-            cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-            cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-            cacheCfg.setBackups(0);
-            cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-            CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-            metaCacheCfg.setName("replicated");
-            metaCacheCfg.setCacheMode(REPLICATED);
-            metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-            metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-            IgniteConfiguration cfg = new IgniteConfiguration();
-
-            cfg.setGridName("grid_secondary");
-
-            TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-            discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-            cfg.setDiscoverySpi(discoSpi);
-            cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-            cfg.setFileSystemConfiguration(igfsCfg);
-            cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-            cfg.setLocalHost(U.getLocalHost().getHostAddress());
-            cfg.setCommunicationSpi(communicationSpi());
-
-            G.start(cfg);
-        }
-
-        startGrids(4);
-
-        awaitPartitionMapExchange();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getTestGridName() {
-        return "grid";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(IP_FINDER);
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(cacheConfiguration(gridName));
-        cfg.setFileSystemConfiguration(igfsConfiguration(gridName));
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setCommunicationSpi(communicationSpi());
-
-        return cfg;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @param gridName Grid name.
-     * @return Cache configuration.
-     */
-    protected CacheConfiguration[] cacheConfiguration(String gridName) {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
-    }
-
-    /**
-     * Gets IGFS configuration.
-     *
-     * @param gridName Grid name.
-     * @return IGFS configuration.
-     */
-    protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException {
-        FileSystemConfiguration cfg = new FileSystemConfiguration();
-
-        cfg.setDataCacheName("partitioned");
-        cfg.setMetaCacheName("replicated");
-        cfg.setName("igfs");
-        cfg.setPrefetchBlocks(1);
-        cfg.setMaxSpaceSize(64 * 1024 * 1024);
-        cfg.setDefaultMode(mode);
-
-        if (mode != PRIMARY)
-            cfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(secondaryFileSystemUriPath(),
-                secondaryFileSystemConfigPath(), SECONDARY_FS_USER));
-
-        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
-        cfg.setManagementPort(-1);
-
-        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
-
-        return cfg;
-    }
-
-    /** @return Communication SPI. */
-    private CommunicationSpi communicationSpi() {
-        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
-
-        commSpi.setSharedMemoryPort(-1);
-
-        return commSpi;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        primaryFsUri = new URI(primaryFileSystemUriPath());
-
-        primaryFsCfg = new Configuration();
-
-        primaryFsCfg.addResource(U.resolveIgniteUrl(primaryFileSystemConfigPath()));
-
-        UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, getClientFsUser());
-
-        // Create Fs on behalf of the client user:
-        ugi.doAs(new PrivilegedExceptionAction<Object>() {
-            @Override public Object run() throws Exception {
-                fs = AbstractFileSystem.get(primaryFsUri, primaryFsCfg);
-
-                return null;
-            }
-        });
-
-        barrier = new CyclicBarrier(THREAD_CNT);
-    }
-
-    /**
-     * Gets the user the Fs client operates on bahalf of.
-     * @return The user the Fs client operates on bahalf of.
-     */
-    protected String getClientFsUser() {
-        return "foo";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        try {
-            HadoopIgfsUtils.clear(fs);
-        }
-        catch (Exception ignore) {
-            // No-op.
-        }
-
-        U.closeQuiet((Closeable)fs);
-    }
-
-    /** @throws Exception If failed. */
-    public void testStatus() throws Exception {
-        Path file1 = new Path("/file1");
-
-        try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()))) {
-            file.write(new byte[1024 * 1024]);
-        }
-
-        FsStatus status = fs.getFsStatus();
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner());
-
-        assertEquals(4, grid(0).cluster().nodes().size());
-
-        long used = 0, max = 0;
-
-        for (int i = 0; i < 4; i++) {
-            IgniteFileSystem igfs = grid(i).fileSystem("igfs");
-
-            IgfsMetrics metrics = igfs.metrics();
-
-            used += metrics.localSpaceSize();
-            max += metrics.maxSpaceSize();
-        }
-
-        assertEquals(used, status.getUsed());
-        assertEquals(max, status.getCapacity());
-    }
-
-    /** @throws Exception If failed. */
-    public void testTimes() throws Exception {
-        Path file = new Path("/file1");
-
-        long now = System.currentTimeMillis();
-
-        try (FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()))) {
-            os.write(new byte[1024 * 1024]);
-        }
-
-        FileStatus status = fs.getFileStatus(file);
-
-        assertTrue(status.getAccessTime() >= now);
-        assertTrue(status.getModificationTime() >= now);
-
-        long accessTime = now - 10 * 60 * 1000;
-        long modificationTime = now - 5 * 60 * 1000;
-
-        fs.setTimes(file, modificationTime, accessTime);
-
-        status = fs.getFileStatus(file);
-        assertEquals(accessTime, status.getAccessTime());
-        assertEquals(modificationTime, status.getModificationTime());
-
-        // Check listing is updated as well.
-        FileStatus[] files = fs.listStatus(new Path("/"));
-
-        assertEquals(1, files.length);
-
-        assertEquals(file.getName(), files[0].getPath().getName());
-        assertEquals(accessTime, files[0].getAccessTime());
-        assertEquals(modificationTime, files[0].getModificationTime());
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.setTimes(new Path("/unknownFile"), 0, 0);
-
-                return null;
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testCreateCheckParameters() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(null, EnumSet.noneOf(CreateFlag.class),
-                    Options.CreateOpts.perms(FsPermission.getDefault()));
-            }
-        }, NullPointerException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testCreateBase() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-        Path file = new Path(dir, "someFile");
-
-        assertPathDoesNotExist(fs, file);
-
-        FsPermission fsPerm = new FsPermission((short)644);
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(fsPerm));
-
-        // Try to write something in file.
-        os.write("abc".getBytes());
-
-        os.close();
-
-        // Check file status.
-        FileStatus fileStatus = fs.getFileStatus(file);
-
-        assertFalse(fileStatus.isDirectory());
-        assertEquals(file, fileStatus.getPath());
-        assertEquals(fsPerm, fileStatus.getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    public void testCreateCheckOverwrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-        final Path file = new Path(dir, "someFile");
-
-        FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        out.close();
-
-        // Check intermediate directory permissions.
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(file, EnumSet.noneOf(CreateFlag.class),
-                    Options.CreateOpts.perms(FsPermission.getDefault()));
-            }
-        }, PathExistsException.class, null);
-
-        // Overwrite should be successful.
-        FSDataOutputStream out1 = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        out1.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfNoSuchPath() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        assertPathDoesNotExist(fs, dir);
-
-        assertFalse(fs.delete(dir, true));
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks.
-
-        for (int i = 0; i < cnt; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        final FSDataInputStream is = fs.open(file, -1);
-
-        for (int i = 0; i < cnt / 2; i++)
-            assertEquals(i, is.readInt());
-
-        assert fs.delete(file, false);
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.getFileStatus(file);
-
-                return null;
-            }
-        }, FileNotFoundException.class, null);
-
-        is.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfFilePathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        assertTrue(fs.delete(file, false));
-
-        assertPathDoesNotExist(fs, file);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteIfDirectoryPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(dir, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        assertTrue(fs.delete(dir, false));
-
-        assertPathDoesNotExist(fs, dir);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteFailsIfNonRecursive() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        final Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.delete(someDir2, false);
-
-                return null;
-            }
-        }, PathIsNotEmptyDirectoryException.class, null);
-
-        assertPathExists(fs, someDir2);
-        assertPathExists(fs, someDir3);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteRecursively() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
-
-        assertTrue(fs.delete(someDir2, true));
-
-        assertPathDoesNotExist(fs, someDir2);
-        assertPathDoesNotExist(fs, someDir3);
-    }
-
-    /** @throws Exception If failed. */
-    public void testDeleteRecursivelyFromRoot() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
-
-        FSDataOutputStream os = fs.create(someDir3, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        Path root = new Path(fsHome, "/");
-
-        assertFalse(fs.delete(root, true));
-
-        assertTrue(fs.delete(new Path(fsHome, "/someDir1"), true));
-
-        assertPathDoesNotExist(fs, someDir3);
-        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
-        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
-        assertPathExists(fs, root);
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetPermissionCheckDefaultPermission() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        fs.setPermission(file, null);
-
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetPermissionCheckNonRecursiveness() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        Path tmpDir = new Path(fsHome, "/tmp");
-
-        FsPermission perm = new FsPermission((short)123);
-
-        fs.setPermission(tmpDir, perm);
-
-        assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("OctalInteger")
-    public void testSetPermission() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        for (short i = 0; i <= 0777; i += 7) {
-            FsPermission perm = new FsPermission(i);
-
-            fs.setPermission(file, perm);
-
-            assertEquals(perm, fs.getFileStatus(file).getPermission());
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FsPermission perm = new FsPermission((short)123);
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        fs.setPermission(file, perm);
-
-        os.close();
-
-        assertEquals(perm, fs.getFileStatus(file).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.setOwner(null, "aUser", "aGroup");
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: p");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersUserIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.setOwner(file, null, "aGroup");
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: username");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckParametersGroupIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-                fs.setOwner(file, "aUser", null);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: grpName");
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwner() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
-
-        fs.setOwner(file, "aUser", "aGroup");
-
-        assertEquals("aUser", fs.getFileStatus(file).getOwner());
-        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerIfOutputStreamIsNotClosed() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "myFile");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        fs.setOwner(file, "aUser", "aGroup");
-
-        os.close();
-
-        assertEquals("aUser", fs.getFileStatus(file).getOwner());
-        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /** @throws Exception If failed. */
-    public void testSetOwnerCheckNonRecursiveness() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "/tmp/my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        Path tmpDir = new Path(fsHome, "/tmp");
-
-        fs.setOwner(file, "fUser", "fGroup");
-        fs.setOwner(tmpDir, "dUser", "dGroup");
-
-        assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
-        assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
-
-        assertEquals("fUser", fs.getFileStatus(file).getOwner());
-        assertEquals("fGroup", fs.getFileStatus(file).getGroup());
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenCheckParametersPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.open(null, 1024);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenNoSuchPath() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.open(file, 1024);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpenIfPathIsAlreadyOpened() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        FSDataInputStream is1 = fs.open(file);
-        FSDataInputStream is2 = fs.open(file);
-
-        is1.close();
-        is2.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testOpen() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        int cnt = 2 * 1024;
-
-        try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()))) {
-
-            for (long i = 0; i < cnt; i++)
-                out.writeLong(i);
-        }
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
-
-        try (FSDataInputStream in = fs.open(file, 1024)) {
-
-            for (long i = 0; i < cnt; i++)
-                assertEquals(i, in.readLong());
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppendIfPathPointsToDirectory() throws Exception {
-        final Path fsHome = new Path(primaryFsUri);
-        final Path dir = new Path(fsHome, "/tmp");
-        Path file = new Path(dir, "my");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND),
-                    Options.CreateOpts.perms(FsPermission.getDefault()));
-            }
-        }, IOException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        FSDataOutputStream appendOs = fs.create(file, EnumSet.of(CreateFlag.APPEND),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.create(file, EnumSet.of(CreateFlag.APPEND),
-                    Options.CreateOpts.perms(FsPermission.getDefault()));
-            }
-        }, IOException.class, null);
-
-        appendOs.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testAppend() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path file = new Path(fsHome, "someFile");
-
-        int cnt = 1024;
-
-        FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        for (int i = 0; i < cnt; i++)
-            out.writeLong(i);
-
-        out.close();
-
-        out = fs.create(file, EnumSet.of(CreateFlag.APPEND),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        for (int i = cnt; i < cnt * 2; i++)
-            out.writeLong(i);
-
-        out.close();
-
-        FSDataInputStream in = fs.open(file, 1024);
-
-        for (int i = 0; i < cnt * 2; i++)
-            assertEquals(i, in.readLong());
-
-        in.close();
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameCheckParametersSrcPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.rename(null, file);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameCheckParametersDstPathIsNull() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path file = new Path(fsHome, "someFile");
-
-        fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault())).close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-                fs.rename(file, null);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathDoesNotExist() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path srcFile = new Path(fsHome, "srcFile");
-        final Path dstFile = new Path(fsHome, "dstFile");
-
-        assertPathDoesNotExist(fs, srcFile);
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.rename(srcFile, dstFile);
-
-                return null;
-            }
-        }, FileNotFoundException.class, null);
-
-        assertPathDoesNotExist(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathIsAlreadyBeingOpenedToWrite() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        os = fs.create(srcFile, EnumSet.of(CreateFlag.APPEND),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        fs.rename(srcFile, dstFile);
-
-        assertPathExists(fs, dstFile);
-
-        String testStr = "Test";
-
-        try {
-            os.writeBytes(testStr);
-        }
-        finally {
-            os.close();
-        }
-
-        try (FSDataInputStream is = fs.open(dstFile)) {
-            byte[] buf = new byte[testStr.getBytes().length];
-
-            is.readFully(buf);
-
-            assertEquals(testStr, new String(buf));
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameFileIfDstPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        final Path srcFile = new Path(fsHome, "srcFile");
-        final Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        os = fs.create(dstFile, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.rename(srcFile, dstFile);
-
-                return null;
-            }
-        }, FileAlreadyExistsException.class, null);
-
-        assertPathExists(fs, srcFile);
-        assertPathExists(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameFile() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "/tmp/srcFile");
-        Path dstFile = new Path(fsHome, "/tmp/dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        fs.rename(srcFile, dstFile);
-
-        assertPathDoesNotExist(fs, srcFile);
-        assertPathExists(fs, dstFile);
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcFile = new Path(fsHome, "srcFile");
-        Path dstFile = new Path(fsHome, "dstFile");
-
-        FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        int cnt = 1024;
-
-        for (int i = 0; i < cnt; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        FSDataInputStream is = fs.open(srcFile);
-
-        for (int i = 0; i < cnt; i++) {
-            if (i == 100)
-                // Rename file during the read process.
-                fs.rename(srcFile, dstFile);
-
-            assertEquals(i, is.readInt());
-        }
-
-        assertPathDoesNotExist(fs, srcFile);
-        assertPathExists(fs, dstFile);
-
-        os.close();
-        is.close();
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRenameDirectoryIfDstPathExists() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path srcDir = new Path(fsHome, "/tmp/");
-        Path dstDir = new Path(fsHome, "/tmpNew/");
-
-        FSDataOutputStream os = fs.create(new Path(srcDir, "file1"), EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        os = fs.create(new Path(dstDir, "file2"), EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        try {
-            fs.rename(srcDir, dstDir);
-
-            fail("FileAlreadyExistsException expected.");
-        }
-        catch (FileAlreadyExistsException ignore) {
-            // No-op.
-        }
-
-        // Check all the files stay unchanged:
-        assertPathExists(fs, dstDir);
-        assertPathExists(fs, new Path(dstDir, "file2"));
-
-        assertPathExists(fs, srcDir);
-        assertPathExists(fs, new Path(srcDir, "file1"));
-    }
-
-    /** @throws Exception If failed. */
-    public void testRenameDirectory() throws Exception {
-        Path fsHome = new Path(primaryFsUri);
-        Path dir = new Path(fsHome, "/tmp/");
-        Path newDir = new Path(fsHome, "/tmpNew/");
-
-        FSDataOutputStream os = fs.create(new Path(dir, "myFile"), EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        os.close();
-
-        fs.rename(dir, newDir);
-
-        assertPathDoesNotExist(fs, dir);
-        assertPathExists(fs, newDir);
-    }
-
-    /** @throws Exception If failed. */
-    public void testListStatusIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.listStatus(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testListStatusIfPathDoesNotExist() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.listStatus(new Path("/someDir"));
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /**
-     * Test directory listing.
-     *
-     * @throws Exception If failed.
-     */
-    public void testListStatus() throws Exception {
-        Path igfsHome = new Path(primaryFsUri);
-
-        // Test listing of an empty directory.
-        Path dir = new Path(igfsHome, "dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        FileStatus[] list = fs.listStatus(dir);
-
-        assert list.length == 0;
-
-        // Test listing of a not empty directory.
-        Path subDir = new Path(dir, "subDir");
-
-        fs.mkdir(subDir, FsPermission.getDefault(), true);
-
-        Path file = new Path(dir, "file");
-
-        FSDataOutputStream fos = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        fos.close();
-
-        list = fs.listStatus(dir);
-
-        assert list.length == 2;
-
-        String listRes1 = list[0].getPath().getName();
-        String listRes2 = list[1].getPath().getName();
-
-        assert "subDir".equals(listRes1) && "file".equals(listRes2) || "subDir".equals(listRes2) &&
-            "file".equals(listRes1);
-
-        // Test listing of a file.
-        list = fs.listStatus(file);
-
-        assert list.length == 1;
-
-        assert "file".equals(list[0].getPath().getName());
-    }
-
-    /** @throws Exception If failed. */
-    public void testMkdirsIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.mkdir(null, FsPermission.getDefault(), true);
-
-                return null;
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testMkdirsIfPermissionIsNull() throws Exception {
-        Path dir = new Path("/tmp");
-
-        fs.mkdir(dir, null, true);
-
-        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
-    }
-
-    /** @throws Exception If failed. */
-    @SuppressWarnings("OctalInteger")
-    public void testMkdirs() throws Exception {
-        Path fsHome = new Path(primaryFileSystemUriPath());
-        Path dir = new Path(fsHome, "/tmp/staging");
-        Path nestedDir = new Path(dir, "nested");
-
-        FsPermission dirPerm = FsPermission.createImmutable((short)0700);
-        FsPermission nestedDirPerm = FsPermission.createImmutable((short)111);
-
-        fs.mkdir(dir, dirPerm, true);
-        fs.mkdir(nestedDir, nestedDirPerm, true);
-
-        assertEquals(dirPerm, fs.getFileStatus(dir).getPermission());
-        assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission());
-
-        assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner());
-        assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner());
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileStatusIfPathIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(null);
-            }
-        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileStatusIfPathDoesNotExist() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(new Path("someDir"));
-            }
-        }, FileNotFoundException.class, "File not found: someDir");
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocationsIfFileStatusIsNull() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                // Argument is checked by Hadoop.
-                return fs.getFileBlockLocations(null, 1, 2);
-            }
-        }, NullPointerException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileBlockLocations(new Path("/someFile"), 1, 2);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testGetFileBlockLocations() throws Exception {
-        Path igfsHome = new Path(primaryFsUri);
-
-        Path file = new Path(igfsHome, "someFile");
-
-        try (OutputStream out = new BufferedOutputStream(fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault())))) {
-            byte[] data = new byte[128 * 1024];
-
-            for (int i = 0; i < 100; i++)
-                out.write(data);
-
-            out.flush();
-        }
-
-        try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
-            byte[] data = new byte[128 * 1024];
-
-            int read;
-
-            do {
-                read = in.read(data);
-            }
-            while (read > 0);
-        }
-
-        FileStatus status = fs.getFileStatus(file);
-
-        int grpLen = 128 * 512 * 1024;
-
-        int grpCnt = (int)((status.getLen() + grpLen - 1) / grpLen);
-
-        BlockLocation[] locations = fs.getFileBlockLocations(file, 0, status.getLen());
-
-        assertEquals(grpCnt, locations.length);
-    }
-
-    /** @throws Exception If failed. */
-    public void testZeroReplicationFactor() throws Exception {
-        // This test doesn't make sense for any mode except of PRIMARY.
-        if (mode == PRIMARY) {
-            Path igfsHome = new Path(primaryFsUri);
-
-            Path file = new Path(igfsHome, "someFile");
-
-            try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-                Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.repFac((short)1))) {
-                out.write(new byte[1024 * 1024]);
-            }
-
-            IgniteFileSystem igfs = grid(0).fileSystem("igfs");
-
-            IgfsPath filePath = new IgfsPath("/someFile");
-
-            IgfsFile fileInfo = igfs.info(filePath);
-
-            Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length());
-
-            assertEquals(1, locations.size());
-
-            IgfsBlockLocation location = F.first(locations);
-
-            assertEquals(1, location.nodeIds().size());
-        }
-    }
-
-    /**
-     * Ensure that when running in multithreaded mode only one create() operation succeed.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedCreate() throws Exception {
-        Path dir = new Path(new Path(primaryFsUri), "/dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        final Path file = new Path(dir, "file");
-
-        fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault())).close();
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                int idx = cnt.getAndIncrement();
-
-                byte[] data = new byte[256];
-
-                Arrays.fill(data, (byte)idx);
-
-                FSDataOutputStream os = null;
-
-                try {
-                    os = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
-                        Options.CreateOpts.perms(FsPermission.getDefault()));
-
-                    os.write(data);
-                }
-                catch (IOException ignore) {
-                    errs.add(idx);
-                }
-                finally {
-                    U.awaitQuiet(barrier);
-
-                    U.closeQuiet(os);
-                }
-            }
-        }, THREAD_CNT);
-
-        // Only one thread could obtain write lock on the file.
-        assert errs.size() == THREAD_CNT - 1 : "Invalid errors count [expected=" + (THREAD_CNT - 1) + ", actual=" +
-            errs.size() + ']';
-
-        int idx = -1;
-
-        for (int i = 0; i < THREAD_CNT; i++) {
-            if (!errs.remove(i)) {
-                idx = i;
-
-                break;
-            }
-        }
-
-        byte[] expData = new byte[256];
-
-        Arrays.fill(expData, (byte)idx);
-
-        FSDataInputStream is = fs.open(file);
-
-        byte[] data = new byte[256];
-
-        is.read(data);
-
-        is.close();
-
-        assert Arrays.equals(expData, data);
-    }
-
-    /**
-     * Ensure that when running in multithreaded mode only one append() operation succeed.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedAppend() throws Exception {
-        Path dir = new Path(new Path(primaryFsUri), "/dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        final Path file = new Path(dir, "file");
-
-        fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault())).close();
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                int idx = cnt.getAndIncrement();
-
-                byte[] data = new byte[256];
-
-                Arrays.fill(data, (byte)idx);
-
-                U.awaitQuiet(barrier);
-
-                FSDataOutputStream os = null;
-
-                try {
-                    os = fs.create(file, EnumSet.of(CreateFlag.APPEND),
-                        Options.CreateOpts.perms(FsPermission.getDefault()));
-
-                    os.write(data);
-                }
-                catch (IOException ignore) {
-                    errs.add(idx);
-                }
-                finally {
-                    U.awaitQuiet(barrier);
-
-                    U.closeQuiet(os);
-                }
-            }
-        }, THREAD_CNT);
-
-        // Only one thread could obtain write lock on the file.
-        assert errs.size() == THREAD_CNT - 1;
-
-        int idx = -1;
-
-        for (int i = 0; i < THREAD_CNT; i++) {
-            if (!errs.remove(i)) {
-                idx = i;
-
-                break;
-            }
-        }
-
-        byte[] expData = new byte[256];
-
-        Arrays.fill(expData, (byte)idx);
-
-        FSDataInputStream is = fs.open(file);
-
-        byte[] data = new byte[256];
-
-        is.read(data);
-
-        is.close();
-
-        assert Arrays.equals(expData, data);
-    }
-
-    /**
-     * Test concurrent reads within the file.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedOpen() throws Exception {
-        final byte[] dataChunk = new byte[256];
-
-        for (int i = 0; i < dataChunk.length; i++)
-            dataChunk[i] = (byte)i;
-
-        Path dir = new Path(new Path(primaryFsUri), "/dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        final Path file = new Path(dir, "file");
-
-        FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault()));
-
-        // Write 256 * 2048 = 512Kb of data.
-        for (int i = 0; i < 2048; i++)
-            os.write(dataChunk);
-
-        os.close();
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override
-            public void run() {
-                FSDataInputStream is = null;
-
-                try {
-                    int pos = ThreadLocalRandom8.current().nextInt(2048);
-
-                    try {
-                        is = fs.open(file);
-                    }
-                    finally {
-                        U.awaitQuiet(barrier);
-                    }
-
-                    is.seek(256 * pos);
-
-                    byte[] buf = new byte[256];
-
-                    for (int i = pos; i < 2048; i++) {
-                        // First perform normal read.
-                        int read = is.read(buf);
-
-                        assert read == 256;
-
-                        Arrays.equals(dataChunk, buf);
-                    }
-
-                    int res = is.read(buf);
-
-                    assert res == -1;
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-                finally {
-                    U.closeQuiet(is);
-                }
-            }
-        }, THREAD_CNT);
-
-        assert !err.get();
-    }
-
-    /**
-     * Test concurrent creation of multiple directories.
-     *
-     * @throws Exception If failed.
-     */
-    public void testMultithreadedMkdirs() throws Exception {
-        final Path dir = new Path(new Path("igfs:///"), "/dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        final int depth = 3;
-        final int entryCnt = 5;
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-                queue.add(F.t(0, dir));
-
-                U.awaitQuiet(barrier);
-
-                while (!queue.isEmpty()) {
-                    IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-                    int curDepth = t.getKey();
-                    Path curPath = t.getValue();
-
-                    if (curDepth <= depth) {
-                        int newDepth = curDepth + 1;
-
-                        // Create directories.
-                        for (int i = 0; i < entryCnt; i++) {
-                            Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                            try {
-                                fs.mkdir(subDir, FsPermission.getDefault(), true);
-                            }
-                            catch (IOException ignore) {
-                                err.set(true);
-                            }
-
-                            queue.addLast(F.t(newDepth, subDir));
-                        }
-                    }
-                }
-            }
-        }, THREAD_CNT);
-
-        // Ensure there were no errors.
-        assert !err.get();
-
-        // Ensure correct folders structure.
-        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-        queue.add(F.t(0, dir));
-
-        while (!queue.isEmpty()) {
-            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-            int curDepth = t.getKey();
-            Path curPath = t.getValue();
-
-            if (curDepth <= depth) {
-                int newDepth = curDepth + 1;
-
-                // Create directories.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                    assertNotNull(fs.getFileStatus(subDir));
-
-                    queue.add(F.t(newDepth, subDir));
-                }
-            }
-        }
-    }
-
-    /**
-     * Test concurrent deletion of the same directory with advanced structure.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("TooBroadScope")
-    public void testMultithreadedDelete() throws Exception {
-        final Path dir = new Path(new Path(primaryFsUri), "/dir");
-
-        fs.mkdir(dir, FsPermission.getDefault(), true);
-
-        int depth = 3;
-        int entryCnt = 5;
-
-        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
-
-        queue.add(F.t(0, dir));
-
-        while (!queue.isEmpty()) {
-            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
-
-            int curDepth = t.getKey();
-            Path curPath = t.getValue();
-
-            if (curDepth < depth) {
-                int newDepth = curDepth + 1;
-
-                // Create directories.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
-
-                    fs.mkdir(subDir, FsPermission.getDefault(), true);
-
-                    queue.addLast(F.t(newDepth, subDir));
-                }
-            }
-            else {
-                // Create files.
-                for (int i = 0; i < entryCnt; i++) {
-                    Path file = new Path(curPath, "file " + i);
-
-                    fs.create(file, EnumSet.noneOf(CreateFlag.class),
-                        Options.CreateOpts.perms(FsPermission.getDefault())).close();
-                }
-            }
-        }
-
-        final AtomicBoolean err = new AtomicBoolean();
-
-        multithreaded(new Runnable() {
-            @Override public void run() {
-                try {
-                    U.awaitQuiet(barrier);
-
-                    fs.delete(dir, true);
-                }
-                catch (FileNotFoundException ignore) {
-                    // No-op.
-                }
-                catch (IOException ignore) {
-                    err.set(true);
-                }
-            }
-        }, THREAD_CNT);
-
-        // Ensure there were no errors.
-        assert !err.get();
-
-        // Ensure the directory was actually deleted.
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                fs.getFileStatus(dir);
-
-                return null;
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** @throws Exception If failed. */
-    public void testConsistency() throws Exception {
-        // Default buffers values
-        checkConsistency(-1, 1, -1, -1, 1, -1);
-        checkConsistency(-1, 10, -1, -1, 10, -1);
-        checkConsistency(-1, 100, -1, -1, 100, -1);
-        checkConsistency(-1, 1000, -1, -1, 1000, -1);
-        checkConsistency(-1, 10000, -1, -1, 10000, -1);
-        checkConsistency(-1, 100000, -1, -1, 100000, -1);
-
-        checkConsistency(65 * 1024 + 13, 100000, -1, -1, 100000, -1);
-
-        checkConsistency(-1, 100000, 2 * 4 * 1024 + 17, -1, 100000, -1);
-
-        checkConsistency(-1, 100000, -1, 65 * 1024 + 13, 100000, -1);
-
-        checkConsistency(-1, 100000, -1, -1, 100000, 2 * 4 * 1024 + 17);
-
-        checkConsistency(65 * 1024 + 13, 100000, 2 * 4 * 1024 + 13, 65 * 1024 + 149, 100000, 2 * 4 * 1024 + 157);
-    }
-
-    /**
-     * Verifies that client reconnects after connection to the server has been lost.
-     *
-     * @throws Exception If error occurs.
-     */
-    public void testClientReconnect() throws Exception {
-        final Path igfsHome = new Path(primaryFsUri);
-
-        final Path filePath = new Path(igfsHome, "someFile");
-
-        final FSDataOutputStream s = fs.create(filePath, EnumSet.noneOf(CreateFlag.class),
-            Options.CreateOpts.perms(FsPermission.getDefault())); // Open stream before stopping IGFS.
-
-        try {
-            G.stopAll(true); // Stop the server.
-
-            startNodes(); // Start server again.
-
-            // Check that client is again operational.
-            fs.mkdir(new Path("igfs:///dir1/dir2"), FsPermission.getDefault(), true);
-
-            // However, the streams, opened before disconnect, should not be valid.
-            GridTestUtils.assertThrows(log, new Callable<Object>() {
-                @Nullable @Override public Object call() throws Exception {
-                    s.write("test".getBytes());
-
-                    s.flush();
-
-                    return null;
-                }
-            }, IOException.class, null);
-
-            GridTestUtils.assertThrows(log, new Callable<Object>() {
-                @Override public Object call() throws Exception {
-                    fs.getFileStatus(filePath);
-
-                    return null;
-                }
-            }, FileNotFoundException.class, null);
-        }
-        finally {
-            U.closeQuiet(s);
-        }
-    }
-
-    /**
-     * Verifies that client reconnects after connection to the server has been lost (multithreaded mode).
-     *
-     * @throws Exception If error occurs.
-     */
-    public void testClientReconnectMultithreaded() throws Exception {
-        final ConcurrentLinkedQueue<FileSystem> q = new ConcurrentLinkedQueue<>();
-
-        Configuration cfg = new Configuration();
-
-        for (Map.Entry<String, String> entry : primaryFsCfg)
-            cfg.set(entry.getKey(), entry.getValue());
-
-        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
-
-        final int nClients = 16;
-
-        // Initialize clients.
-        for (int i = 0; i < nClients; i++)
-            q.add(FileSystem.get(primaryFsUri, cfg));
-
-        G.stopAll(true); // Stop the server.
-
-        startNodes(); // Start server again.
-
-        GridTestUtils.runMultiThreaded(new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                FileSystem fs = q.poll();
-
-                try {
-                    // Check that client is again operational.
-                    assertTrue(fs.mkdirs(new Path("igfs:///" + Thread.currentThread().getName())));
-
-                    return true;
-                }
-                finally {
-                    U.closeQuiet(fs);
-                }
-            }
-        }, nClients, "test-client");
-    }
-
-    /**
-     * Checks consistency of create --> open --> append --> open operations with different buffer sizes.
-     *
-     * @param createBufSize Buffer size used for file creation.
-     * @param writeCntsInCreate Count of times to write in file creation.
-     * @param openAfterCreateBufSize Buffer size used for file opening after creation.
-     * @param appendBufSize Buffer size used for file appending.
-     * @param writeCntsInAppend Count of times to write in file appending.
-     * @param openAfterAppendBufSize Buffer size used for file opening after appending.
-     * @throws Exception If failed.
-     */
-    private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize,
-        int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception {
-        final Path igfsHome = new Path(primaryFsUri);
-
-        Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile");
-
-        if (createBufSize == -1)
-            createBufSize = fs.getServerDefaults().getFileBufferSize();
-
-        if (appendBufSize == -1)
-            appendBufSize = fs.getServerDefaults().getFileBufferSize();
-
-        FSDataOutputStream os = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
-            Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.bufferSize(createBufSize));
-
-        for (int i = 0; i < writeCntsInCreate; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        FSDataInputStream is = fs.open(file, openAfterCreateBufSize);
-
-        for (int i = 0; i < writeCntsInCreate; i++)
-            assertEquals(i, is.readInt());
-
-        is.close();
-
-        os = fs.create(file, EnumSet.of(CreateFlag.APPEND),
-            Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.bufferSize(appendBufSize));
-
-        for (int i = writeCntsInCreate; i < writeCntsInCreate + writeCntsInAppend; i++)
-            os.writeInt(i);
-
-        os.close();
-
-        is = fs.open(file, openAfterAppendBufSize);
-
-        for (int i = 0; i < writeCntsInCreate + writeCntsInAppend; i++)
-            assertEquals(i, is.readInt());
-
-        is.close();
-    }
-
-    /**
-     * Test expected failures for 'close' operation.
-     *
-     * @param fs File system to test.
-     * @param msg Expected exception message.
-     */
-    public void assertCloseFails(final FileSystem fs, String msg) {
-        GridTestUtils.assertThrows(log, new Callable() {
-            @Override public Object call() throws Exception {
-                fs.close();
-
-                return null;
-            }
-        }, IOException.class, msg);
-    }
-
-    /**
-     * Test expected failures for 'get content summary' operation.
-     *
-     * @param fs File system to test.
-     * @param path Path to evaluate content summary for.
-     */
-    private void assertContentSummaryFails(final FileSystem fs, final Path path) {
-        GridTestUtils.assertThrows(log, new Callable<ContentSummary>() {
-            @Override public ContentSummary call() throws Exception {
-                return fs.getContentSummary(path);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /**
-     * Assert that a given path exists in a given FileSystem.
-     *
-     * @param fs FileSystem to check.
-     * @param p Path to check.
-     * @throws IOException if the path does not exist.
-     */
-    private void assertPathExists(AbstractFileSystem fs, Path p) throws IOException {
-        FileStatus fileStatus = fs.getFileStatus(p);
-
-        assertEquals(p, fileStatus.getPath());
-        assertNotSame(0, fileStatus.getModificationTime());
-    }
-
-    /**
-     * Check path does not exist in a given FileSystem.
-     *
-     * @param fs FileSystem to check.
-     * @param path Path to check.
-     */
-    private void assertPathDoesNotExist(final AbstractFileSystem fs, final Path path) {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(path);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /** Helper class to encapsulate source and destination folders. */
-    @SuppressWarnings({"PublicInnerClass", "PublicField"})
-    public static final class Config {
-        /** Source file system. */
-        public final AbstractFileSystem srcFs;
-
-        /** Source path to work with. */
-        public final Path src;
-
-        /** Destination file system. */
-        public final AbstractFileSystem destFs;
-
-        /** Destination path to work with. */
-        public final Path dest;
-
-        /**
-         * Copying task configuration.
-         *
-         * @param srcFs Source file system.
-         * @param src Source path.
-         * @param destFs Destination file system.
-         * @param dest Destination path.
-         */
-        public Config(AbstractFileSystem srcFs, Path src, AbstractFileSystem destFs, Path dest) {
-            this.srcFs = srcFs;
-            this.src = src;
-            this.destFs = destFs;
-            this.dest = dest;
-        }
-    }
-
-    /**
-     * Convert path for exception message testing purposes.
-     *
-     * @param path Path.
-     * @return Converted path.
-     * @throws Exception If failed.
-     */
-    private Path convertPath(Path path) throws Exception {
-        if (mode != PROXY)
-            return path;
-        else {
-            URI secondaryUri = new URI(secondaryFileSystemUriPath());
-
-            URI pathUri = path.toUri();
-
-            return new Path(new URI(pathUri.getScheme() != null ? secondaryUri.getScheme() : null,
-                pathUri.getAuthority() != null ? secondaryUri.getAuthority() : null, pathUri.getPath(), null, null));
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
deleted file mode 100644
index ff5cd5b..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * Tests Hadoop 2.x file system in primary mode.
- */
-public class HadoopIgfs20FileSystemLoopbackPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest {
-    /**
-     * Creates test in primary mode.
-     */
-    public HadoopIgfs20FileSystemLoopbackPrimarySelfTest() {
-        super(PRIMARY);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String primaryFileSystemUriPath() {
-        return "igfs://igfs:" + getTestGridName(0) + "@/";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String primaryFileSystemConfigPath() {
-        return "/modules/core/src/test/config/hadoop/core-site-loopback.xml";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
-        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
-
-        cfg.setType(IgfsIpcEndpointType.TCP);
-        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String secondaryFileSystemUriPath() {
-        assert false;
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String secondaryFileSystemConfigPath() {
-        assert false;
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration() {
-        assert false;
-
-        return null;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
deleted file mode 100644
index 2bc9eb8..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * Tests Hadoop 2.x file system in primary mode.
- */
-public class HadoopIgfs20FileSystemShmemPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest {
-    /**
-     * Creates test in primary mode.
-     */
-    public HadoopIgfs20FileSystemShmemPrimarySelfTest() {
-        super(PRIMARY);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String primaryFileSystemUriPath() {
-        return "igfs://igfs:" + getTestGridName(0) + "@/";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String primaryFileSystemConfigPath() {
-        return "/modules/core/src/test/config/hadoop/core-site.xml";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
-        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
-
-        cfg.setType(IgfsIpcEndpointType.SHMEM);
-        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String secondaryFileSystemUriPath() {
-        assert false;
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String secondaryFileSystemConfigPath() {
-        assert false;
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgfsIpcEndpointConfiguration secondaryIpcEndpointConfiguration() {
-        assert false;
-
-        return null;
-    }
-}
\ No newline at end of file


[22/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java
new file mode 100644
index 0000000..f70ef2f
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Multimap;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.io.GridDataInput;
+import org.apache.ignite.internal.util.io.GridUnsafeDataInput;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.X;
+
+import static java.lang.Math.abs;
+import static java.lang.Math.ceil;
+import static java.lang.Math.max;
+
+/**
+ * Skip list tests.
+ */
+public class HadoopSkipListSelfTest extends HadoopAbstractMapTest {
+    /**
+     *
+     */
+    public void testLevel() {
+        Random rnd = new GridRandom();
+
+        int[] levelsCnts = new int[32];
+
+        int all = 10000;
+
+        for (int i = 0; i < all; i++) {
+            int level = HadoopSkipList.randomLevel(rnd);
+
+            levelsCnts[level]++;
+        }
+
+        X.println("Distribution: " + Arrays.toString(levelsCnts));
+
+        for (int level = 0; level < levelsCnts.length; level++) {
+            int exp = (level + 1) == levelsCnts.length ? 0 : all >>> (level + 1);
+
+            double precission = 0.72 / Math.max(32 >>> level, 1);
+
+            int sigma = max((int)ceil(precission * exp), 5);
+
+            X.println("Level: " + level + " exp: " + exp + " act: " + levelsCnts[level] + " precision: " + precission +
+                " sigma: " + sigma);
+
+            assertTrue(abs(exp - levelsCnts[level]) <= sigma); // Sometimes fails.
+        }
+    }
+
+    public void testMapSimple() throws Exception {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+//        mem.listen(new GridOffHeapEventListener() {
+//            @Override public void onEvent(GridOffHeapEvent evt) {
+//                if (evt == GridOffHeapEvent.ALLOCATE)
+//                    U.dumpStack();
+//            }
+//        });
+
+        Random rnd = new Random();
+
+        int mapSize = 16 << rnd.nextInt(6);
+
+        HadoopJobInfo job = new JobInfo();
+
+        HadoopTaskContext taskCtx = new TaskContext();
+
+        HadoopMultimap m = new HadoopSkipList(job, mem);
+
+        HadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx);
+
+        Multimap<Integer, Integer> mm = ArrayListMultimap.create();
+        Multimap<Integer, Integer> vis = ArrayListMultimap.create();
+
+        for (int i = 0, vals = 4 * mapSize + rnd.nextInt(25); i < vals; i++) {
+            int key = rnd.nextInt(mapSize);
+            int val = rnd.nextInt();
+
+            a.write(new IntWritable(key), new IntWritable(val));
+            mm.put(key, val);
+
+            X.println("k: " + key + " v: " + val);
+
+            a.close();
+
+            check(m, mm, vis, taskCtx);
+
+            a = m.startAdding(taskCtx);
+        }
+
+//        a.add(new IntWritable(10), new IntWritable(2));
+//        mm.put(10, 2);
+//        check(m, mm);
+
+        a.close();
+
+        X.println("Alloc: " + mem.allocatedSize());
+
+        m.close();
+
+        assertEquals(0, mem.allocatedSize());
+    }
+
+    private void check(HadoopMultimap m, Multimap<Integer, Integer> mm, final Multimap<Integer, Integer> vis, HadoopTaskContext taskCtx)
+        throws Exception {
+        final HadoopTaskInput in = m.input(taskCtx);
+
+        Map<Integer, Collection<Integer>> mmm = mm.asMap();
+
+        int keys = 0;
+
+        int prevKey = Integer.MIN_VALUE;
+
+        while (in.next()) {
+            keys++;
+
+            IntWritable k = (IntWritable)in.key();
+
+            assertNotNull(k);
+
+            assertTrue(k.get() > prevKey);
+
+            prevKey = k.get();
+
+            Deque<Integer> vs = new LinkedList<>();
+
+            Iterator<?> it = in.values();
+
+            while (it.hasNext())
+                vs.addFirst(((IntWritable) it.next()).get());
+
+            Collection<Integer> exp = mmm.get(k.get());
+
+            assertEquals(exp, vs);
+        }
+
+        assertEquals(mmm.size(), keys);
+
+//!        assertEquals(m.keys(), keys);
+
+        // Check visitor.
+
+        final byte[] buf = new byte[4];
+
+        final GridDataInput dataInput = new GridUnsafeDataInput();
+
+        m.visit(false, new HadoopConcurrentHashMultimap.Visitor() {
+            /** */
+            IntWritable key = new IntWritable();
+
+            /** */
+            IntWritable val = new IntWritable();
+
+            @Override public void onKey(long keyPtr, int keySize) {
+                read(keyPtr, keySize, key);
+            }
+
+            @Override public void onValue(long valPtr, int valSize) {
+                read(valPtr, valSize, val);
+
+                vis.put(key.get(), val.get());
+            }
+
+            private void read(long ptr, int size, Writable w) {
+                assert size == 4 : size;
+
+                GridUnsafe.copyMemory(null, ptr, buf, GridUnsafe.BYTE_ARR_OFF, size);
+
+                dataInput.bytes(buf, size);
+
+                try {
+                    w.readFields(dataInput);
+                }
+                catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        });
+
+//        X.println("vis: " + vis);
+
+        assertEquals(mm, vis);
+
+        in.close();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testMultiThreaded() throws Exception {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+        X.println("___ Started");
+
+        Random rnd = new GridRandom();
+
+        for (int i = 0; i < 20; i++) {
+            HadoopJobInfo job = new JobInfo();
+
+            final HadoopTaskContext taskCtx = new TaskContext();
+
+            final HadoopMultimap m = new HadoopSkipList(job, mem);
+
+            final ConcurrentMap<Integer, Collection<Integer>> mm = new ConcurrentHashMap<>();
+
+            X.println("___ MT");
+
+            multithreaded(new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    X.println("___ TH in");
+
+                    Random rnd = new GridRandom();
+
+                    IntWritable key = new IntWritable();
+                    IntWritable val = new IntWritable();
+
+                    HadoopMultimap.Adder a = m.startAdding(taskCtx);
+
+                    for (int i = 0; i < 50000; i++) {
+                        int k = rnd.nextInt(32000);
+                        int v = rnd.nextInt();
+
+                        key.set(k);
+                        val.set(v);
+
+                        a.write(key, val);
+
+                        Collection<Integer> list = mm.get(k);
+
+                        if (list == null) {
+                            list = new ConcurrentLinkedQueue<>();
+
+                            Collection<Integer> old = mm.putIfAbsent(k, list);
+
+                            if (old != null)
+                                list = old;
+                        }
+
+                        list.add(v);
+                    }
+
+                    a.close();
+
+                    X.println("___ TH out");
+
+                    return null;
+                }
+            }, 3 + rnd.nextInt(27));
+
+            HadoopTaskInput in = m.input(taskCtx);
+
+            int prevKey = Integer.MIN_VALUE;
+
+            while (in.next()) {
+                IntWritable key = (IntWritable)in.key();
+
+                assertTrue(key.get() > prevKey);
+
+                prevKey = key.get();
+
+                Iterator<?> valsIter = in.values();
+
+                Collection<Integer> vals = mm.remove(key.get());
+
+                assertNotNull(vals);
+
+                while (valsIter.hasNext()) {
+                    IntWritable val = (IntWritable) valsIter.next();
+
+                    assertTrue(vals.remove(val.get()));
+                }
+
+                assertTrue(vals.isEmpty());
+            }
+
+            in.close();
+            m.close();
+
+            assertEquals(0, mem.allocatedSize());
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java
new file mode 100644
index 0000000..dd571af
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
+
+import java.io.IOException;
+import java.util.Arrays;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class HadoopDataStreamSelfTest extends GridCommonAbstractTest {
+
+    public void testStreams() throws IOException {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+        HadoopDataOutStream out = new HadoopDataOutStream(mem);
+
+        int size = 4 * 1024;
+
+        final long ptr = mem.allocate(size);
+
+        out.buffer().set(ptr, size);
+
+        out.writeBoolean(false);
+        out.writeBoolean(true);
+        out.writeBoolean(false);
+        out.write(17);
+        out.write(121);
+        out.write(0xfafa);
+        out.writeByte(17);
+        out.writeByte(121);
+        out.writeByte(0xfafa);
+        out.writeChar('z');
+        out.writeChar('o');
+        out.writeChar('r');
+        out.writeShort(100);
+        out.writeShort(Short.MIN_VALUE);
+        out.writeShort(Short.MAX_VALUE);
+        out.writeShort(65535);
+        out.writeShort(65536); // 0
+        out.writeInt(Integer.MAX_VALUE);
+        out.writeInt(Integer.MIN_VALUE);
+        out.writeInt(-1);
+        out.writeInt(0);
+        out.writeInt(1);
+        out.writeFloat(0.33f);
+        out.writeFloat(0.5f);
+        out.writeFloat(-0.7f);
+        out.writeFloat(Float.MAX_VALUE);
+        out.writeFloat(Float.MIN_VALUE);
+        out.writeFloat(Float.MIN_NORMAL);
+        out.writeFloat(Float.POSITIVE_INFINITY);
+        out.writeFloat(Float.NEGATIVE_INFINITY);
+        out.writeFloat(Float.NaN);
+        out.writeDouble(-12312312.3333333336666779);
+        out.writeDouble(123123.234);
+        out.writeDouble(Double.MAX_VALUE);
+        out.writeDouble(Double.MIN_VALUE);
+        out.writeDouble(Double.MIN_NORMAL);
+        out.writeDouble(Double.NEGATIVE_INFINITY);
+        out.writeDouble(Double.POSITIVE_INFINITY);
+        out.writeDouble(Double.NaN);
+        out.writeLong(Long.MAX_VALUE);
+        out.writeLong(Long.MIN_VALUE);
+        out.writeLong(0);
+        out.writeLong(-1L);
+        out.write(new byte[]{1,2,3});
+        out.write(new byte[]{0,1,2,3}, 1, 2);
+        out.writeUTF("mom washes rum");
+
+        HadoopDataInStream in = new HadoopDataInStream(mem);
+
+        in.buffer().set(ptr, out.buffer().pointer());
+
+        assertEquals(false, in.readBoolean());
+        assertEquals(true, in.readBoolean());
+        assertEquals(false, in.readBoolean());
+        assertEquals(17, in.read());
+        assertEquals(121, in.read());
+        assertEquals(0xfa, in.read());
+        assertEquals(17, in.readByte());
+        assertEquals(121, in.readByte());
+        assertEquals((byte)0xfa, in.readByte());
+        assertEquals('z', in.readChar());
+        assertEquals('o', in.readChar());
+        assertEquals('r', in.readChar());
+        assertEquals(100, in.readShort());
+        assertEquals(Short.MIN_VALUE, in.readShort());
+        assertEquals(Short.MAX_VALUE, in.readShort());
+        assertEquals(-1, in.readShort());
+        assertEquals(0, in.readShort());
+        assertEquals(Integer.MAX_VALUE, in.readInt());
+        assertEquals(Integer.MIN_VALUE, in.readInt());
+        assertEquals(-1, in.readInt());
+        assertEquals(0, in.readInt());
+        assertEquals(1, in.readInt());
+        assertEquals(0.33f, in.readFloat());
+        assertEquals(0.5f, in.readFloat());
+        assertEquals(-0.7f, in.readFloat());
+        assertEquals(Float.MAX_VALUE, in.readFloat());
+        assertEquals(Float.MIN_VALUE, in.readFloat());
+        assertEquals(Float.MIN_NORMAL, in.readFloat());
+        assertEquals(Float.POSITIVE_INFINITY, in.readFloat());
+        assertEquals(Float.NEGATIVE_INFINITY, in.readFloat());
+        assertEquals(Float.NaN, in.readFloat());
+        assertEquals(-12312312.3333333336666779, in.readDouble());
+        assertEquals(123123.234, in.readDouble());
+        assertEquals(Double.MAX_VALUE, in.readDouble());
+        assertEquals(Double.MIN_VALUE, in.readDouble());
+        assertEquals(Double.MIN_NORMAL, in.readDouble());
+        assertEquals(Double.NEGATIVE_INFINITY, in.readDouble());
+        assertEquals(Double.POSITIVE_INFINITY, in.readDouble());
+        assertEquals(Double.NaN, in.readDouble());
+        assertEquals(Long.MAX_VALUE, in.readLong());
+        assertEquals(Long.MIN_VALUE, in.readLong());
+        assertEquals(0, in.readLong());
+        assertEquals(-1, in.readLong());
+
+        byte[] b = new byte[3];
+
+        in.read(b);
+
+        assertTrue(Arrays.equals(new byte[]{1,2,3}, b));
+
+        b = new byte[4];
+
+        in.read(b, 1, 2);
+
+        assertTrue(Arrays.equals(new byte[]{0, 1, 2, 0}, b));
+
+        assertEquals("mom washes rum", in.readUTF());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java
new file mode 100644
index 0000000..7dd045a
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jsr166.LongAdder8;
+
+/**
+ *
+ */
+public class HadoopExecutorServiceTest extends GridCommonAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testExecutesAll() throws Exception {
+        final HadoopExecutorService exec = new HadoopExecutorService(log, "_GRID_NAME_", 10, 5);
+
+        for (int i = 0; i < 5; i++) {
+            final int loops = 5000;
+            int threads = 17;
+
+            final LongAdder8 sum = new LongAdder8();
+
+            multithreaded(new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    for (int i = 0; i < loops; i++) {
+                        exec.submit(new Callable<Void>() {
+                            @Override
+                            public Void call() throws Exception {
+                                sum.increment();
+
+                                return null;
+                            }
+                        });
+                    }
+
+                    return null;
+                }
+            }, threads);
+
+            while (exec.active() != 0) {
+                X.println("__ active: " + exec.active());
+
+                Thread.sleep(200);
+            }
+
+            assertEquals(threads * loops, sum.sum());
+
+            X.println("_ ok");
+        }
+
+        assertTrue(exec.shutdown(0));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testShutdown() throws Exception {
+        for (int i = 0; i < 5; i++) {
+            final HadoopExecutorService exec = new HadoopExecutorService(log, "_GRID_NAME_", 10, 5);
+
+            final LongAdder8 sum = new LongAdder8();
+
+            final AtomicBoolean finish = new AtomicBoolean();
+
+            IgniteInternalFuture<?> fut = multithreadedAsync(new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    while (!finish.get()) {
+                        exec.submit(new Callable<Void>() {
+                            @Override public Void call() throws Exception {
+                                sum.increment();
+
+                                return null;
+                            }
+                        });
+                    }
+
+                    return null;
+                }
+            }, 19);
+
+            Thread.sleep(200);
+
+            assertTrue(exec.shutdown(50));
+
+            long res = sum.sum();
+
+            assertTrue(res > 0);
+
+            finish.set(true);
+
+            fut.get();
+
+            assertEquals(res, sum.sum()); // Nothing was executed after shutdown.
+
+            X.println("_ ok");
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java
new file mode 100644
index 0000000..ec33836
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.igfs.IgfsOutputStream;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.HadoopAbstractSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.marshaller.jdk.JdkMarshaller;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Job tracker self test.
+ */
+public class HadoopExternalTaskExecutionSelfTest extends HadoopAbstractSelfTest {
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-404");
+
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(true);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setMarshaller(new JdkMarshaller());
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSimpleTaskSubmit() throws Exception {
+        String testInputFile = "/test";
+
+        prepareTestFile(testInputFile);
+
+        Configuration cfg = new Configuration();
+
+        setupFileSystems(cfg);
+
+        Job job = Job.getInstance(cfg);
+
+        job.setMapperClass(TestMapper.class);
+        job.setCombinerClass(TestReducer.class);
+        job.setReducerClass(TestReducer.class);
+
+        job.setMapOutputKeyClass(Text.class);
+        job.setMapOutputValueClass(IntWritable.class);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setNumReduceTasks(1);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/" + testInputFile));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output"));
+
+        job.setJarByClass(getClass());
+
+        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
+            createJobInfo(job.getConfiguration()));
+
+        fut.get();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMapperException() throws Exception {
+        String testInputFile = "/test";
+
+        prepareTestFile(testInputFile);
+
+        Configuration cfg = new Configuration();
+
+        setupFileSystems(cfg);
+
+        Job job = Job.getInstance(cfg);
+
+        job.setMapperClass(TestFailingMapper.class);
+        job.setCombinerClass(TestReducer.class);
+        job.setReducerClass(TestReducer.class);
+
+        job.setMapOutputKeyClass(Text.class);
+        job.setMapOutputValueClass(IntWritable.class);
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setNumReduceTasks(1);
+
+        FileInputFormat.setInputPaths(job, new Path("igfs://:" + getTestGridName(0) + "@/" + testInputFile));
+        FileOutputFormat.setOutputPath(job, new Path("igfs://:" + getTestGridName(0) + "@/output"));
+
+        job.setJarByClass(getClass());
+
+        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
+            createJobInfo(job.getConfiguration()));
+
+        try {
+            fut.get();
+        }
+        catch (IgniteCheckedException e) {
+            IOException exp = X.cause(e, IOException.class);
+
+            assertNotNull(exp);
+            assertEquals("Test failure", exp.getMessage());
+        }
+    }
+
+    /**
+     * @param filePath File path to prepare.
+     * @throws Exception If failed.
+     */
+    private void prepareTestFile(String filePath) throws Exception {
+        IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
+
+        try (IgfsOutputStream out = igfs.create(new IgfsPath(filePath), true)) {
+            PrintWriter wr = new PrintWriter(new OutputStreamWriter(out));
+
+            for (int i = 0; i < 1000; i++)
+                wr.println("Hello, world: " + i);
+
+            wr.flush();
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
+        /** One constant. */
+        private IntWritable one = new IntWritable(1);
+
+        /** Line constant. */
+        private Text line = new Text("line");
+
+        @Override protected void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            ctx.write(line, one);
+        }
+    }
+
+    /**
+     * Failing mapper.
+     */
+    private static class TestFailingMapper extends Mapper<Object, Text, Text, IntWritable> {
+        @Override protected void map(Object key, Text val, Context c) throws IOException, InterruptedException {
+            throw new IOException("Test failure");
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
+        /** Line constant. */
+        private Text line = new Text("line");
+
+        @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+            super.setup(ctx);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx)
+            throws IOException, InterruptedException {
+            int s = 0;
+
+            for (IntWritable val : values)
+                s += val.get();
+
+            System.out.println(">>>> Reduced: " + s);
+
+            ctx.write(line, new IntWritable(s));
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java
new file mode 100644
index 0000000..851c3af
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.marshaller.Marshaller;
+import org.apache.ignite.marshaller.jdk.JdkMarshaller;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests Hadoop external communication component.
+ */
+public class HadoopExternalCommunicationSelfTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-404");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSimpleMessageSendingTcp() throws Exception {
+        checkSimpleMessageSending(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSimpleMessageSendingShmem() throws Exception {
+        checkSimpleMessageSending(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void checkSimpleMessageSending(boolean useShmem) throws Exception {
+        UUID parentNodeId = UUID.randomUUID();
+
+        Marshaller marsh = new JdkMarshaller();
+
+        IgniteLogger log = log();
+
+        HadoopExternalCommunication[] comms = new HadoopExternalCommunication[4];
+
+        try {
+            String name = "grid";
+
+            TestHadoopListener[] lsnrs = new TestHadoopListener[4];
+
+            int msgs = 10;
+
+            for (int i = 0; i < comms.length; i++) {
+                comms[i] = new HadoopExternalCommunication(parentNodeId, UUID.randomUUID(), marsh, log,
+                    Executors.newFixedThreadPool(1), name + i);
+
+                if (useShmem)
+                    comms[i].setSharedMemoryPort(14000);
+
+                lsnrs[i] = new TestHadoopListener(msgs);
+
+                comms[i].setListener(lsnrs[i]);
+
+                comms[i].start();
+            }
+
+            for (int r = 0; r < msgs; r++) {
+                for (int from = 0; from < comms.length; from++) {
+                    for (int to = 0; to < comms.length; to++) {
+                        if (from == to)
+                            continue;
+
+                        comms[from].sendMessage(comms[to].localProcessDescriptor(), new TestMessage(from, to));
+                    }
+                }
+            }
+
+            U.sleep(1000);
+
+            for (TestHadoopListener lsnr : lsnrs) {
+                lsnr.await(3_000);
+
+                assertEquals(String.valueOf(lsnr.messages()), msgs * (comms.length - 1), lsnr.messages().size());
+            }
+        }
+        finally {
+            for (HadoopExternalCommunication comm : comms) {
+                if (comm != null)
+                    comm.stop();
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TestHadoopListener implements HadoopMessageListener {
+        /** Received messages (array list is safe because executor has one thread). */
+        private Collection<TestMessage> msgs = new ArrayList<>();
+
+        /** Await latch. */
+        private CountDownLatch receiveLatch;
+
+        /**
+         * @param msgs Number of messages to await.
+         */
+        private TestHadoopListener(int msgs) {
+            receiveLatch = new CountDownLatch(msgs);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg) {
+            assert msg instanceof TestMessage;
+
+            msgs.add((TestMessage)msg);
+
+            receiveLatch.countDown();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onConnectionLost(HadoopProcessDescriptor desc) {
+            // No-op.
+        }
+
+        /**
+         * @return Received messages.
+         */
+        public Collection<TestMessage> messages() {
+            return msgs;
+        }
+
+        /**
+         * @param millis Time to await.
+         * @throws InterruptedException If wait interrupted.
+         */
+        public void await(int millis) throws InterruptedException {
+            receiveLatch.await(millis, TimeUnit.MILLISECONDS);
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TestMessage implements HadoopMessage {
+        /** From index. */
+        private int from;
+
+        /** To index. */
+        private int to;
+
+        /**
+         * @param from From index.
+         * @param to To index.
+         */
+        private TestMessage(int from, int to) {
+            this.from = from;
+            this.to = to;
+        }
+
+        /**
+         * Required by {@link Externalizable}.
+         */
+        public TestMessage() {
+            // No-op.
+        }
+
+        /**
+         * @return From index.
+         */
+        public int from() {
+            return from;
+        }
+
+        /**
+         * @return To index.
+         */
+        public int to() {
+            return to;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void writeExternal(ObjectOutput out) throws IOException {
+            out.writeInt(from);
+            out.writeInt(to);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+            from = in.readInt();
+            to = in.readInt();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
new file mode 100644
index 0000000..603fd5b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
@@ -0,0 +1,354 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.client.hadoop.HadoopClientProtocolEmbeddedSelfTest;
+import org.apache.ignite.client.hadoop.HadoopClientProtocolSelfTest;
+import org.apache.ignite.hadoop.cache.HadoopTxConfigCacheTest;
+import org.apache.ignite.hadoop.fs.KerberosHadoopFileSystemFactorySelfTest;
+import org.apache.ignite.hadoop.util.BasicUserNameMapperSelfTest;
+import org.apache.ignite.hadoop.util.ChainedUserNameMapperSelfTest;
+import org.apache.ignite.hadoop.util.KerberosUserNameMapperSelfTest;
+import org.apache.ignite.igfs.Hadoop1OverIgfsDualAsyncTest;
+import org.apache.ignite.igfs.Hadoop1OverIgfsDualSyncTest;
+import org.apache.ignite.igfs.HadoopFIleSystemFactorySelfTest;
+import org.apache.ignite.igfs.HadoopIgfs20FileSystemLoopbackPrimarySelfTest;
+import org.apache.ignite.igfs.HadoopIgfsDualAsyncSelfTest;
+import org.apache.ignite.igfs.HadoopIgfsDualSyncSelfTest;
+import org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest;
+import org.apache.ignite.igfs.IgfsEventsTestSuite;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemClientSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemHandshakeSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoggerSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoggerStateSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopClassLoaderTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopCommandLineTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopDefaultMapReducePlannerSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileSystemsTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopGroupingTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobTrackerSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceEmbeddedSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceErrorResilienceTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopNoHadoopMapReduceTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerializationWrapperSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSnappyFullMapReduceTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSnappyTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSortingTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopSplitWrapperSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskExecutionSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopTasksV1Test;
+import org.apache.ignite.internal.processors.hadoop.HadoopTasksV2Test;
+import org.apache.ignite.internal.processors.hadoop.HadoopUserLibsSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopV2JobSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopValidationSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopWeightedMapReducePlannerTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopWeightedPlannerMapReduceTest;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimapSelftest;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopHashMapSelfTest;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipListSelfTest;
+import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopDataStreamSelfTest;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URL;
+import java.net.URLConnection;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+
+import static org.apache.ignite.testframework.GridTestUtils.modeToPermissionSet;
+
+/**
+ * Test suite for Hadoop Map Reduce engine.
+ */
+public class IgniteHadoopTestSuite extends TestSuite {
+    /**
+     * @return Test suite.
+     * @throws Exception Thrown in case of the failure.
+     */
+    public static TestSuite suite() throws Exception {
+        downloadHadoop();
+        downloadHive();
+
+        final ClassLoader ldr = TestSuite.class.getClassLoader();
+
+        TestSuite suite = new TestSuite("Ignite Hadoop MR Test Suite");
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopUserLibsSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopDefaultMapReducePlannerSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopWeightedMapReducePlannerTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(BasicUserNameMapperSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(KerberosUserNameMapperSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(ChainedUserNameMapperSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(KerberosHadoopFileSystemFactorySelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSnappyTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSnappyFullMapReduceTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopClassLoaderTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemLoopbackPrimarySelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfsDualSyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfsDualAsyncSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(Hadoop1OverIgfsDualSyncTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(Hadoop1OverIgfsDualAsyncTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopFIleSystemFactorySelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemClientSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoggerStateSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoggerSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemHandshakeSelfTest.class.getName())));
+
+        suite.addTest(IgfsEventsTestSuite.suiteNoarchOnly());
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopFileSystemsTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopValidationSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopJobTrackerSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopHashMapSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopDataStreamSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopConcurrentHashMultimapSelftest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSkipListSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopTaskExecutionSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopV2JobSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSerializationWrapperSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSplitWrapperSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopTasksV1Test.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopTasksV2Test.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopMapReduceTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopWeightedPlannerMapReduceTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopNoHadoopMapReduceTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopMapReduceErrorResilienceTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopMapReduceEmbeddedSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingTest.class.getName())));
+
+//        suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalTaskExecutionSelfTest.class.getName())));
+//        suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalCommunicationSelfTest.class.getName())));
+//        suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingExternalTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopGroupingTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopClientProtocolSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopClientProtocolEmbeddedSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopCommandLineTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopSecondaryFileSystemConfigurationTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopTxConfigCacheTest.class.getName())));
+
+        return suite;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public static void downloadHive() throws Exception {
+        String ver = IgniteSystemProperties.getString("hive.version", "1.2.1");
+
+        X.println("Will use Hive version: " + ver);
+
+        String downloadPath = "hive/hive-" + ver + "/apache-hive-" + ver + "-bin.tar.gz";
+
+        download("Hive", "HIVE_HOME", downloadPath, "apache-hive-" + ver + "-bin");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public static void downloadHadoop() throws Exception {
+        String ver = IgniteSystemProperties.getString("hadoop.version", "2.4.1");
+
+        X.println("Will use Hadoop version: " + ver);
+
+        String downloadPath = "hadoop/core/hadoop-" + ver + "/hadoop-" + ver + ".tar.gz";
+
+        download("Hadoop", "HADOOP_HOME", downloadPath, "hadoop-" + ver);
+    }
+
+    /**
+     *  Downloads and extracts an Apache product.
+     *
+     * @param appName Name of application for log messages.
+     * @param homeVariable Pointer to home directory of the component.
+     * @param downloadPath Relative download path of tar package.
+     * @param destName Local directory name to install component.
+     * @throws Exception If failed.
+     */
+    private static void download(String appName, String homeVariable, String downloadPath, String destName)
+        throws Exception {
+        String homeVal = IgniteSystemProperties.getString(homeVariable);
+
+        if (!F.isEmpty(homeVal) && new File(homeVal).isDirectory()) {
+            X.println(homeVariable + " is set to: " + homeVal);
+
+            return;
+        }
+
+        List<String> urls = F.asList(
+            "http://archive.apache.org/dist/",
+            "http://apache-mirror.rbc.ru/pub/apache/",
+            "http://www.eu.apache.org/dist/",
+            "http://www.us.apache.org/dist/");
+
+        String tmpPath = System.getProperty("java.io.tmpdir");
+
+        X.println("tmp: " + tmpPath);
+
+        final File install = new File(tmpPath + File.separatorChar + "__hadoop");
+
+        final File home = new File(install, destName);
+
+        X.println("Setting " + homeVariable + " to " + home.getAbsolutePath());
+
+        System.setProperty(homeVariable, home.getAbsolutePath());
+
+        final File successFile = new File(home, "__success");
+
+        if (home.exists()) {
+            if (successFile.exists()) {
+                X.println(appName + " distribution already exists.");
+
+                return;
+            }
+
+            X.println(appName + " distribution is invalid and it will be deleted.");
+
+            if (!U.delete(home))
+                throw new IOException("Failed to delete directory: " + home.getAbsolutePath());
+        }
+
+        for (String url : urls) {
+            if (!(install.exists() || install.mkdirs()))
+                throw new IOException("Failed to create directory: " + install.getAbsolutePath());
+
+            URL u = new URL(url + downloadPath);
+
+            X.println("Attempting to download from: " + u);
+
+            try {
+                URLConnection c = u.openConnection();
+
+                c.connect();
+
+                try (TarArchiveInputStream in = new TarArchiveInputStream(new GzipCompressorInputStream(
+                    new BufferedInputStream(c.getInputStream(), 32 * 1024)))) {
+
+                    TarArchiveEntry entry;
+
+                    while ((entry = in.getNextTarEntry()) != null) {
+                        File dest = new File(install, entry.getName());
+
+                        if (entry.isDirectory()) {
+                            if (!dest.mkdirs())
+                                throw new IllegalStateException();
+                        }
+                        else if (entry.isSymbolicLink()) {
+                            // Important: in Hadoop installation there are symlinks, we need to create them:
+                            Path theLinkItself = Paths.get(install.getAbsolutePath(), entry.getName());
+
+                            Path linkTarget = Paths.get(entry.getLinkName());
+
+                            Files.createSymbolicLink(theLinkItself, linkTarget);
+                        }
+                        else {
+                            File parent = dest.getParentFile();
+
+                            if (!(parent.exists() || parent.mkdirs()))
+                                throw new IllegalStateException();
+
+                            X.print(" [" + dest);
+
+                            try (BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(dest, false),
+                                    128 * 1024)) {
+                                U.copy(in, out);
+
+                                out.flush();
+                            }
+
+                            Files.setPosixFilePermissions(dest.toPath(), modeToPermissionSet(entry.getMode()));
+
+                            X.println("]");
+                        }
+                    }
+                }
+
+                if (successFile.createNewFile())
+                    return;
+            }
+            catch (Exception e) {
+                e.printStackTrace();
+
+                U.delete(home);
+            }
+        }
+
+        throw new IllegalStateException("Failed to install " + appName + ".");
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
new file mode 100644
index 0000000..4ed1d65
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.igfs.HadoopIgfs20FileSystemShmemPrimarySelfTest;
+import org.apache.ignite.igfs.IgfsEventsTestSuite;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemIpcCacheSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemExternalDualSyncSelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemExternalPrimarySelfTest;
+import org.apache.ignite.igfs.IgniteHadoopFileSystemShmemExternalSecondarySelfTest;
+import org.apache.ignite.internal.processors.igfs.IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest;
+
+import static org.apache.ignite.testsuites.IgniteHadoopTestSuite.downloadHadoop;
+
+/**
+ * Test suite for Hadoop file system over Ignite cache.
+ * Contains tests which works on Linux and Mac OS platform only.
+ */
+public class IgniteIgfsLinuxAndMacOSTestSuite extends TestSuite {
+    /**
+     * @return Test suite.
+     * @throws Exception Thrown in case of the failure.
+     */
+    public static TestSuite suite() throws Exception {
+        downloadHadoop();
+
+        ClassLoader ldr = TestSuite.class.getClassLoader();
+
+        TestSuite suite = new TestSuite("Ignite IGFS Test Suite For Linux And Mac OS");
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalPrimarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalSecondarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemIpcCacheSelfTest.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemShmemPrimarySelfTest.class.getName())));
+
+        suite.addTest(IgfsEventsTestSuite.suite());
+
+        return suite;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/pom.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop/pom.xml b/modules/hadoop/pom.xml
index a3f40e5..70b8d03 100644
--- a/modules/hadoop/pom.xml
+++ b/modules/hadoop/pom.xml
@@ -54,42 +54,6 @@
         </dependency>
 
         <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-annotations</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-auth</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-common</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-hdfs</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-mapreduce-client-common</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-mapreduce-client-core</artifactId>
-            <version>${hadoop.version}</version>
-        </dependency>
-
-        <dependency>
             <groupId>log4j</groupId>
             <artifactId>log4j</artifactId>
         </dependency>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
deleted file mode 100644
index a01bfaf..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.hadoop.util.KerberosUserNameMapper;
-import org.apache.ignite.hadoop.util.UserNameMapper;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.util.Arrays;
-
-/**
- * Simple Hadoop file system factory which delegates to {@code FileSystem.get()} on each call.
- * <p>
- * If {@code "fs.[prefix].impl.disable.cache"} is set to {@code true}, file system instances will be cached by Hadoop.
- */
-public class BasicHadoopFileSystemFactory implements HadoopFileSystemFactory, Externalizable, LifecycleAware {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** File system URI. */
-    private String uri;
-
-    /** File system config paths. */
-    private String[] cfgPaths;
-
-    /** User name mapper. */
-    private UserNameMapper usrNameMapper;
-
-    /** Configuration of the secondary filesystem, never null. */
-    protected transient Configuration cfg;
-
-    /** Resulting URI. */
-    protected transient URI fullUri;
-
-    /**
-     * Constructor.
-     */
-    public BasicHadoopFileSystemFactory() {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public final FileSystem get(String name) throws IOException {
-        String name0 = IgfsUtils.fixUserName(name);
-
-        if (usrNameMapper != null)
-            name0 = IgfsUtils.fixUserName(usrNameMapper.map(name0));
-
-        return getWithMappedName(name0);
-    }
-
-    /**
-     * Internal file system create routine.
-     *
-     * @param usrName User name.
-     * @return File system.
-     * @throws IOException If failed.
-     */
-    protected FileSystem getWithMappedName(String usrName) throws IOException {
-        assert cfg != null;
-
-        try {
-            // FileSystem.get() might delegate to ServiceLoader to get the list of file system implementation.
-            // And ServiceLoader is known to be sensitive to context classloader. Therefore, we change context
-            // classloader to classloader of current class to avoid strange class-cast-exceptions.
-            ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
-
-            try {
-                return create(usrName);
-            }
-            finally {
-                HadoopUtils.restoreContextClassLoader(oldLdr);
-            }
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            throw new IOException("Failed to create file system due to interrupt.", e);
-        }
-    }
-
-    /**
-     * Internal file system creation routine, invoked in correct class loader context.
-     *
-     * @param usrName User name.
-     * @return File system.
-     * @throws IOException If failed.
-     * @throws InterruptedException if the current thread is interrupted.
-     */
-    protected FileSystem create(String usrName) throws IOException, InterruptedException {
-        return FileSystem.get(fullUri, cfg, usrName);
-    }
-
-    /**
-     * Gets file system URI.
-     * <p>
-     * This URI will be used as a first argument when calling {@link FileSystem#get(URI, Configuration, String)}.
-     * <p>
-     * If not set, default URI will be picked from file system configuration using
-     * {@link FileSystem#getDefaultUri(Configuration)} method.
-     *
-     * @return File system URI.
-     */
-    @Nullable public String getUri() {
-        return uri;
-    }
-
-    /**
-     * Sets file system URI. See {@link #getUri()} for more information.
-     *
-     * @param uri File system URI.
-     */
-    public void setUri(@Nullable String uri) {
-        this.uri = uri;
-    }
-
-    /**
-     * Gets paths to additional file system configuration files (e.g. core-site.xml).
-     * <p>
-     * Path could be either absolute or relative to {@code IGNITE_HOME} environment variable.
-     * <p>
-     * All provided paths will be loaded in the order they provided and then applied to {@link Configuration}. It means
-     * that path order might be important in some cases.
-     * <p>
-     * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
-     * {@link IgniteHadoopFileSystem} resides. Corresponding paths must exist on these machines as well.
-     *
-     * @return Paths to file system configuration files.
-     */
-    @Nullable public String[] getConfigPaths() {
-        return cfgPaths;
-    }
-
-    /**
-     * Set paths to additional file system configuration files (e.g. core-site.xml). See {@link #getConfigPaths()} for
-     * more information.
-     *
-     * @param cfgPaths Paths to file system configuration files.
-     */
-    public void setConfigPaths(@Nullable String... cfgPaths) {
-        this.cfgPaths = cfgPaths;
-    }
-
-    /**
-     * Get optional user name mapper.
-     * <p>
-     * When IGFS is invoked from Hadoop, user name is passed along the way to ensure that request will be performed
-     * with proper user context. User name is passed in a simple form and doesn't contain any extended information,
-     * such as host, domain or Kerberos realm. You may use name mapper to translate plain user name to full user
-     * name required by security engine of the underlying file system.
-     * <p>
-     * For example you may want to use {@link KerberosUserNameMapper} to user name from {@code "johndoe"} to
-     * {@code "johndoe@YOUR.REALM.COM"}.
-     *
-     * @return User name mapper.
-     */
-    @Nullable public UserNameMapper getUserNameMapper() {
-        return usrNameMapper;
-    }
-
-    /**
-     * Set optional user name mapper. See {@link #getUserNameMapper()} for more information.
-     *
-     * @param usrNameMapper User name mapper.
-     */
-    public void setUserNameMapper(@Nullable UserNameMapper usrNameMapper) {
-        this.usrNameMapper = usrNameMapper;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteException {
-        cfg = HadoopUtils.safeCreateConfiguration();
-
-        if (cfgPaths != null) {
-            for (String cfgPath : cfgPaths) {
-                if (cfgPath == null)
-                    throw new NullPointerException("Configuration path cannot be null: " + Arrays.toString(cfgPaths));
-                else {
-                    URL url = U.resolveIgniteUrl(cfgPath);
-
-                    if (url == null) {
-                        // If secConfPath is given, it should be resolvable:
-                        throw new IgniteException("Failed to resolve secondary file system configuration path " +
-                            "(ensure that it exists locally and you have read access to it): " + cfgPath);
-                    }
-
-                    cfg.addResource(url);
-                }
-            }
-        }
-
-        // If secondary fs URI is not given explicitly, try to get it from the configuration:
-        if (uri == null)
-            fullUri = FileSystem.getDefaultUri(cfg);
-        else {
-            try {
-                fullUri = new URI(uri);
-            }
-            catch (URISyntaxException use) {
-                throw new IgniteException("Failed to resolve secondary file system URI: " + uri);
-            }
-        }
-
-        if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
-            ((LifecycleAware)usrNameMapper).start();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop() throws IgniteException {
-        if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
-            ((LifecycleAware)usrNameMapper).stop();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeString(out, uri);
-
-        if (cfgPaths != null) {
-            out.writeInt(cfgPaths.length);
-
-            for (String cfgPath : cfgPaths)
-                U.writeString(out, cfgPath);
-        }
-        else
-            out.writeInt(-1);
-
-        out.writeObject(usrNameMapper);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        uri = U.readString(in);
-
-        int cfgPathsCnt = in.readInt();
-
-        if (cfgPathsCnt != -1) {
-            cfgPaths = new String[cfgPathsCnt];
-
-            for (int i = 0; i < cfgPathsCnt; i++)
-                cfgPaths[i] = U.readString(in);
-        }
-
-        usrNameMapper = (UserNameMapper)in.readObject();
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
deleted file mode 100644
index bcbb082..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
-
-import java.io.IOException;
-import java.net.URI;
-
-/**
- * Caching Hadoop file system factory. Caches {@link FileSystem} instances on per-user basis. Doesn't rely on
- * built-in Hadoop {@code FileSystem} caching mechanics. Separate {@code FileSystem} instance is created for each
- * user instead.
- * <p>
- * This makes cache instance resistant to concurrent calls to {@link FileSystem#close()} in other parts of the user
- * code. On the other hand, this might cause problems on some environments. E.g. if Kerberos is enabled, a call to
- * {@link FileSystem#get(URI, Configuration, String)} will refresh Kerberos token. But this factory implementation
- * calls this method only once per user what may lead to token expiration. In such cases it makes sense to either
- * use {@link BasicHadoopFileSystemFactory} or implement your own factory.
- */
-public class CachingHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Per-user file system cache. */
-    private final transient HadoopLazyConcurrentMap<String, FileSystem> cache = new HadoopLazyConcurrentMap<>(
-        new HadoopLazyConcurrentMap.ValueFactory<String, FileSystem>() {
-            @Override public FileSystem createValue(String key) throws IOException {
-                return CachingHadoopFileSystemFactory.super.getWithMappedName(key);
-            }
-        }
-    );
-
-    /**
-     * Public non-arg constructor.
-     */
-    public CachingHadoopFileSystemFactory() {
-        // noop
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileSystem getWithMappedName(String name) throws IOException {
-        return cache.getOrCreate(name);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteException {
-        super.start();
-
-        // Disable caching.
-        cfg.setBoolean(HadoopFileSystemsUtils.disableFsCachePropertyName(fullUri.getScheme()), true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop() throws IgniteException {
-        super.stop();
-
-        try {
-            cache.close();
-        }
-        catch (IgniteCheckedException ice) {
-            throw new IgniteException(ice);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
deleted file mode 100644
index 5ad08ab..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.lifecycle.LifecycleAware;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-/**
- * Factory for Hadoop {@link FileSystem} used by {@link IgniteHadoopIgfsSecondaryFileSystem}.
- * <p>
- * {@link #get(String)} method will be used whenever a call to a target {@code FileSystem} is required.
- * <p>
- * It is implementation dependent whether to rely on built-in Hadoop file system cache, implement own caching facility
- * or doesn't cache file systems at all.
- * <p>
- * Concrete factory may implement {@link LifecycleAware} interface. In this case start and stop callbacks will be
- * performed by Ignite. You may want to implement some initialization or cleanup there.
- * <p>
- * Note that factory extends {@link Serializable} interface as it might be necessary to transfer factories over the
- * wire to {@link IgniteHadoopFileSystem} if {@link IgfsMode#PROXY} is enabled for some file
- * system paths.
- */
-public interface HadoopFileSystemFactory extends Serializable {
-    /**
-     * Gets file system for the given user name.
-     *
-     * @param usrName User name
-     * @return File system.
-     * @throws IOException In case of error.
-     */
-    public FileSystem get(String usrName) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
deleted file mode 100644
index 8085826..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.T2;
-
-/**
- * Statistic writer implementation that writes info into any Hadoop file system.
- */
-public class IgniteHadoopFileSystemCounterWriter implements HadoopCounterWriter {
-    /** */
-    public static final String PERFORMANCE_COUNTER_FILE_NAME = "performance";
-
-    /** */
-    public static final String COUNTER_WRITER_DIR_PROPERTY = "ignite.counters.fswriter.directory";
-
-    /** */
-    private static final String USER_MACRO = "${USER}";
-
-    /** */
-    private static final String DEFAULT_COUNTER_WRITER_DIR = "/user/" + USER_MACRO;
-
-    /** {@inheritDoc} */
-    @Override public void write(HadoopJob job, HadoopCounters cntrs)
-        throws IgniteCheckedException {
-
-        Configuration hadoopCfg = HadoopUtils.safeCreateConfiguration();
-
-        final HadoopJobInfo jobInfo = job.info();
-
-        final HadoopJobId jobId = job.id();
-
-        for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)jobInfo).properties().entrySet())
-            hadoopCfg.set(e.getKey(), e.getValue());
-
-        String user = jobInfo.user();
-
-        user = IgfsUtils.fixUserName(user);
-
-        String dir = jobInfo.property(COUNTER_WRITER_DIR_PROPERTY);
-
-        if (dir == null)
-            dir = DEFAULT_COUNTER_WRITER_DIR;
-
-        Path jobStatPath = new Path(new Path(dir.replace(USER_MACRO, user)), jobId.toString());
-
-        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
-
-        try {
-            hadoopCfg.set(MRJobConfig.USER_NAME, user);
-
-            FileSystem fs = ((HadoopV2Job)job).fileSystem(jobStatPath.toUri(), hadoopCfg);
-
-            fs.mkdirs(jobStatPath);
-
-            try (PrintStream out = new PrintStream(fs.create(new Path(jobStatPath, PERFORMANCE_COUNTER_FILE_NAME)))) {
-                for (T2<String, Long> evt : perfCntr.evts()) {
-                    out.print(evt.get1());
-                    out.print(':');
-                    out.println(evt.get2().toString());
-                }
-
-                out.flush();
-            }
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file


[41/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
new file mode 100644
index 0000000..dc5874d
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java
@@ -0,0 +1,976 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReentrantLock;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.HadoopContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskExecutorAdapter;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child.HadoopExternalProcessStarter;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopMessageListener;
+import org.apache.ignite.internal.util.GridSpinReadWriteLock;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.IgnitePortProtocol;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentHashMap8;
+import org.jsr166.ConcurrentLinkedDeque8;
+
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.CRASHED;
+import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.FAILED;
+
+/**
+ * External process registry. Handles external process lifecycle.
+ */
+public class HadoopExternalTaskExecutor extends HadoopTaskExecutorAdapter {
+    /** Hadoop context. */
+    private HadoopContext ctx;
+
+    /** */
+    private String javaCmd;
+
+    /** Logger. */
+    private IgniteLogger log;
+
+    /** Node process descriptor. */
+    private HadoopProcessDescriptor nodeDesc;
+
+    /** Output base. */
+    private File outputBase;
+
+    /** Path separator. */
+    private String pathSep;
+
+    /** Hadoop external communication. */
+    private HadoopExternalCommunication comm;
+
+    /** Starting processes. */
+    private final ConcurrentMap<UUID, HadoopProcess> runningProcsByProcId = new ConcurrentHashMap8<>();
+
+    /** Starting processes. */
+    private final ConcurrentMap<HadoopJobId, HadoopProcess> runningProcsByJobId = new ConcurrentHashMap8<>();
+
+    /** Busy lock. */
+    private final GridSpinReadWriteLock busyLock = new GridSpinReadWriteLock();
+
+    /** Job tracker. */
+    private HadoopJobTracker jobTracker;
+
+    /** {@inheritDoc} */
+    @Override public void start(HadoopContext ctx) throws IgniteCheckedException {
+        this.ctx = ctx;
+
+        log = ctx.kernalContext().log(HadoopExternalTaskExecutor.class);
+
+        outputBase = U.resolveWorkDirectory("hadoop", false);
+
+        pathSep = System.getProperty("path.separator", U.isWindows() ? ";" : ":");
+
+        initJavaCommand();
+
+        comm = new HadoopExternalCommunication(
+            ctx.localNodeId(),
+            UUID.randomUUID(),
+            ctx.kernalContext().config().getMarshaller(),
+            log,
+            ctx.kernalContext().getSystemExecutorService(),
+            ctx.kernalContext().gridName());
+
+        comm.setListener(new MessageListener());
+
+        comm.start();
+
+        nodeDesc = comm.localProcessDescriptor();
+
+        ctx.kernalContext().ports().registerPort(nodeDesc.tcpPort(), IgnitePortProtocol.TCP,
+            HadoopExternalTaskExecutor.class);
+
+        if (nodeDesc.sharedMemoryPort() != -1)
+            ctx.kernalContext().ports().registerPort(nodeDesc.sharedMemoryPort(), IgnitePortProtocol.TCP,
+                HadoopExternalTaskExecutor.class);
+
+        jobTracker = ctx.jobTracker();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop(boolean cancel) {
+        busyLock.writeLock();
+
+        try {
+            comm.stop();
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to gracefully stop external hadoop communication server (will shutdown anyway)", e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onJobStateChanged(final HadoopJobMetadata meta) {
+        final HadoopProcess proc = runningProcsByJobId.get(meta.jobId());
+
+        // If we have a local process for this job.
+        if (proc != null) {
+            if (log.isDebugEnabled())
+                log.debug("Updating job information for remote task process [proc=" + proc + ", meta=" + meta + ']');
+
+            if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) {
+                if (log.isDebugEnabled())
+                    log.debug("Completed job execution, will terminate child process [jobId=" + meta.jobId() +
+                        ", proc=" + proc + ']');
+
+                runningProcsByJobId.remove(meta.jobId());
+                runningProcsByProcId.remove(proc.descriptor().processId());
+
+                proc.terminate();
+
+                return;
+            }
+
+            if (proc.initFut.isDone()) {
+                if (!proc.initFut.isFailed())
+                    sendJobInfoUpdate(proc, meta);
+                else if (log.isDebugEnabled())
+                    log.debug("Failed to initialize child process (will skip job state notification) " +
+                        "[jobId=" + meta.jobId() + ", meta=" + meta + ']');
+            }
+            else {
+                proc.initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
+                    @Override
+                    public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
+                        try {
+                            f.get();
+
+                            sendJobInfoUpdate(proc, meta);
+                        }
+                        catch (IgniteCheckedException e) {
+                            if (log.isDebugEnabled())
+                                log.debug("Failed to initialize child process (will skip job state notification) " +
+                                    "[jobId=" + meta.jobId() + ", meta=" + meta + ", err=" + e + ']');
+                        }
+
+                    }
+                });
+            }
+        }
+        else if (ctx.isParticipating(meta)) {
+            HadoopJob job;
+
+            try {
+                job = jobTracker.job(meta.jobId(), meta.jobInfo());
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to get job: " + meta.jobId(), e);
+
+                return;
+            }
+
+            startProcess(job, meta.mapReducePlan());
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ConstantConditions")
+    @Override public void run(final HadoopJob job, final Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException {
+        if (!busyLock.tryReadLock()) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to start hadoop tasks (grid is stopping, will ignore).");
+
+            return;
+        }
+
+        try {
+            HadoopProcess proc = runningProcsByJobId.get(job.id());
+
+            HadoopTaskType taskType = F.first(tasks).type();
+
+            if (taskType == HadoopTaskType.SETUP || taskType == HadoopTaskType.ABORT ||
+                taskType == HadoopTaskType.COMMIT) {
+                if (proc == null || proc.terminated()) {
+                    runningProcsByJobId.remove(job.id(), proc);
+
+                    // Start new process for ABORT task since previous processes were killed.
+                    proc = startProcess(job, jobTracker.plan(job.id()));
+
+                    if (log.isDebugEnabled())
+                        log.debug("Starting new process for maintenance task [jobId=" + job.id() +
+                            ", proc=" + proc + ", taskType=" + taskType + ']');
+                }
+            }
+            else
+                assert proc != null : "Missing started process for task execution request: " + job.id() +
+                    ", tasks=" + tasks;
+
+            final HadoopProcess proc0 = proc;
+
+            proc.initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
+                @Override public void apply(
+                    IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
+                    if (!busyLock.tryReadLock())
+                        return;
+
+                    try {
+                        f.get();
+
+                        proc0.addTasks(tasks);
+
+                        if (log.isDebugEnabled())
+                            log.debug("Sending task execution request to child process [jobId=" + job.id() +
+                                ", proc=" + proc0 + ", tasks=" + tasks + ']');
+
+                        sendExecutionRequest(proc0, job, tasks);
+                    }
+                    catch (IgniteCheckedException e) {
+                        notifyTasksFailed(tasks, FAILED, e);
+                    }
+                    finally {
+                        busyLock.readUnlock();
+                    }
+                }
+            });
+        }
+        finally {
+            busyLock.readUnlock();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancelTasks(HadoopJobId jobId) {
+        HadoopProcess proc = runningProcsByJobId.get(jobId);
+
+        if (proc != null)
+            proc.terminate();
+    }
+
+    /**
+     * Sends execution request to remote node.
+     *
+     * @param proc Process to send request to.
+     * @param job Job instance.
+     * @param tasks Collection of tasks to execute in started process.
+     */
+    private void sendExecutionRequest(HadoopProcess proc, HadoopJob job, Collection<HadoopTaskInfo> tasks)
+        throws IgniteCheckedException {
+        // Must synchronize since concurrent process crash may happen and will receive onConnectionLost().
+        proc.lock();
+
+        try {
+            if (proc.terminated()) {
+                notifyTasksFailed(tasks, CRASHED, null);
+
+                return;
+            }
+
+            HadoopTaskExecutionRequest req = new HadoopTaskExecutionRequest();
+
+            req.jobId(job.id());
+            req.jobInfo(job.info());
+            req.tasks(tasks);
+
+            comm.sendMessage(proc.descriptor(), req);
+        }
+        finally {
+            proc.unlock();
+        }
+    }
+
+    /**
+     * @return External task metadata.
+     */
+    private HadoopExternalTaskMetadata buildTaskMeta() {
+        HadoopExternalTaskMetadata meta = new HadoopExternalTaskMetadata();
+
+        meta.classpath(Arrays.asList(System.getProperty("java.class.path").split(File.pathSeparator)));
+        meta.jvmOptions(Arrays.asList("-Xmx1g", "-ea", "-XX:+UseConcMarkSweepGC", "-XX:+CMSClassUnloadingEnabled",
+            "-DIGNITE_HOME=" + U.getIgniteHome()));
+
+        return meta;
+    }
+
+    /**
+     * @param tasks Tasks to notify about.
+     * @param state Fail state.
+     * @param e Optional error.
+     */
+    private void notifyTasksFailed(Iterable<HadoopTaskInfo> tasks, HadoopTaskState state, Throwable e) {
+        HadoopTaskStatus fail = new HadoopTaskStatus(state, e);
+
+        for (HadoopTaskInfo task : tasks)
+            jobTracker.onTaskFinished(task, fail);
+    }
+
+    /**
+     * Starts process template that will be ready to execute Hadoop tasks.
+     *
+     * @param job Job instance.
+     * @param plan Map reduce plan.
+     */
+    private HadoopProcess startProcess(final HadoopJob job, final HadoopMapReducePlan plan) {
+        final UUID childProcId = UUID.randomUUID();
+
+        HadoopJobId jobId = job.id();
+
+        final HadoopProcessFuture fut = new HadoopProcessFuture(childProcId, jobId);
+
+        final HadoopProcess proc = new HadoopProcess(jobId, fut, plan.reducers(ctx.localNodeId()));
+
+        HadoopProcess old = runningProcsByJobId.put(jobId, proc);
+
+        assert old == null;
+
+        old = runningProcsByProcId.put(childProcId, proc);
+
+        assert old == null;
+
+        ctx.kernalContext().closure().runLocalSafe(new Runnable() {
+            @Override public void run() {
+                if (!busyLock.tryReadLock()) {
+                    fut.onDone(new IgniteCheckedException("Failed to start external process (grid is stopping)."));
+
+                    return;
+                }
+
+                try {
+                    HadoopExternalTaskMetadata startMeta = buildTaskMeta();
+
+                    if (log.isDebugEnabled())
+                        log.debug("Created hadoop child process metadata for job [job=" + job +
+                            ", childProcId=" + childProcId + ", taskMeta=" + startMeta + ']');
+
+                    Process proc = startJavaProcess(childProcId, startMeta, job);
+
+                    BufferedReader rdr = new BufferedReader(new InputStreamReader(proc.getInputStream()));
+
+                    String line;
+
+                    // Read up all the process output.
+                    while ((line = rdr.readLine()) != null) {
+                        if (log.isDebugEnabled())
+                            log.debug("Tracing process output: " + line);
+
+                        if ("Started".equals(line)) {
+                            // Process started successfully, it should not write anything more to the output stream.
+                            if (log.isDebugEnabled())
+                                log.debug("Successfully started child process [childProcId=" + childProcId +
+                                    ", meta=" + job + ']');
+
+                            fut.onProcessStarted(proc);
+
+                            break;
+                        }
+                        else if ("Failed".equals(line)) {
+                            StringBuilder sb = new StringBuilder("Failed to start child process: " + job + "\n");
+
+                            while ((line = rdr.readLine()) != null)
+                                sb.append("    ").append(line).append("\n");
+
+                            // Cut last character.
+                            sb.setLength(sb.length() - 1);
+
+                            log.warning(sb.toString());
+
+                            fut.onDone(new IgniteCheckedException(sb.toString()));
+
+                            break;
+                        }
+                    }
+                }
+                catch (Throwable e) {
+                    fut.onDone(new IgniteCheckedException("Failed to initialize child process: " + job, e));
+
+                    if (e instanceof Error)
+                        throw (Error)e;
+                }
+                finally {
+                    busyLock.readUnlock();
+                }
+            }
+        }, true);
+
+        fut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
+            @Override public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
+                try {
+                    // Make sure there were no exceptions.
+                    f.get();
+
+                    prepareForJob(proc, job, plan);
+                }
+                catch (IgniteCheckedException ignore) {
+                    // Exception is printed in future's onDone() method.
+                }
+            }
+        });
+
+        return proc;
+    }
+
+    /**
+     * Checks that java local command is available.
+     *
+     * @throws IgniteCheckedException If initialization failed.
+     */
+    private void initJavaCommand() throws IgniteCheckedException {
+        String javaHome = System.getProperty("java.home");
+
+        if (javaHome == null)
+            javaHome = System.getenv("JAVA_HOME");
+
+        if (javaHome == null)
+            throw new IgniteCheckedException("Failed to locate JAVA_HOME.");
+
+        javaCmd = javaHome + File.separator + "bin" + File.separator + (U.isWindows() ? "java.exe" : "java");
+
+        try {
+            Process proc = new ProcessBuilder(javaCmd, "-version").redirectErrorStream(true).start();
+
+            Collection<String> out = readProcessOutput(proc);
+
+            int res = proc.waitFor();
+
+            if (res != 0)
+                throw new IgniteCheckedException("Failed to execute 'java -version' command (process finished with nonzero " +
+                    "code) [exitCode=" + res + ", javaCmd='" + javaCmd + "', msg=" + F.first(out) + ']');
+
+            if (log.isInfoEnabled()) {
+                log.info("Will use java for external task execution: ");
+
+                for (String s : out)
+                    log.info("    " + s);
+            }
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException("Failed to check java for external task execution.", e);
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            throw new IgniteCheckedException("Failed to wait for process completion (thread got interrupted).", e);
+        }
+    }
+
+    /**
+     * Reads process output line-by-line.
+     *
+     * @param proc Process to read output.
+     * @return Read lines.
+     * @throws IOException If read failed.
+     */
+    private Collection<String> readProcessOutput(Process proc) throws IOException {
+        BufferedReader rdr = new BufferedReader(new InputStreamReader(proc.getInputStream()));
+
+        Collection<String> res = new ArrayList<>();
+
+        String s;
+
+        while ((s = rdr.readLine()) != null)
+            res.add(s);
+
+        return res;
+    }
+
+    /**
+     * Builds process from metadata.
+     *
+     * @param childProcId Child process ID.
+     * @param startMeta Metadata.
+     * @param job Job.
+     * @return Started process.
+     */
+    private Process startJavaProcess(UUID childProcId, HadoopExternalTaskMetadata startMeta,
+        HadoopJob job) throws Exception {
+        String outFldr = jobWorkFolder(job.id()) + File.separator + childProcId;
+
+        if (log.isDebugEnabled())
+            log.debug("Will write process log output to: " + outFldr);
+
+        List<String> cmd = new ArrayList<>();
+
+        File workDir = U.resolveWorkDirectory("", false);
+
+        cmd.add(javaCmd);
+        cmd.addAll(startMeta.jvmOptions());
+        cmd.add("-cp");
+        cmd.add(buildClasspath(startMeta.classpath()));
+        cmd.add(HadoopExternalProcessStarter.class.getName());
+        cmd.add("-cpid");
+        cmd.add(String.valueOf(childProcId));
+        cmd.add("-ppid");
+        cmd.add(String.valueOf(nodeDesc.processId()));
+        cmd.add("-nid");
+        cmd.add(String.valueOf(nodeDesc.parentNodeId()));
+        cmd.add("-addr");
+        cmd.add(nodeDesc.address());
+        cmd.add("-tport");
+        cmd.add(String.valueOf(nodeDesc.tcpPort()));
+        cmd.add("-sport");
+        cmd.add(String.valueOf(nodeDesc.sharedMemoryPort()));
+        cmd.add("-out");
+        cmd.add(outFldr);
+        cmd.add("-wd");
+        cmd.add(workDir.getAbsolutePath());
+
+        return new ProcessBuilder(cmd)
+            .redirectErrorStream(true)
+            .directory(workDir)
+            .start();
+    }
+
+    /**
+     * Gets job work folder.
+     *
+     * @param jobId Job ID.
+     * @return Job work folder.
+     */
+    private String jobWorkFolder(HadoopJobId jobId) {
+        return outputBase + File.separator + "Job_" + jobId;
+    }
+
+    /**
+     * @param cp Classpath collection.
+     * @return Classpath string.
+     */
+    private String buildClasspath(Collection<String> cp) {
+        assert !cp.isEmpty();
+
+        StringBuilder sb = new StringBuilder();
+
+        for (String s : cp)
+            sb.append(s).append(pathSep);
+
+        sb.setLength(sb.length() - 1);
+
+        return sb.toString();
+    }
+
+    /**
+     * Sends job info update request to remote process.
+     *
+     * @param proc Process to send request to.
+     * @param meta Job metadata.
+     */
+    private void sendJobInfoUpdate(HadoopProcess proc, HadoopJobMetadata meta) {
+        Map<Integer, HadoopProcessDescriptor> rdcAddrs = meta.reducersAddresses();
+
+        int rdcNum = meta.mapReducePlan().reducers();
+
+        HadoopProcessDescriptor[] addrs = null;
+
+        if (rdcAddrs != null && rdcAddrs.size() == rdcNum) {
+            addrs = new HadoopProcessDescriptor[rdcNum];
+
+            for (int i = 0; i < rdcNum; i++) {
+                HadoopProcessDescriptor desc = rdcAddrs.get(i);
+
+                assert desc != null : "Missing reducing address [meta=" + meta + ", rdc=" + i + ']';
+
+                addrs[i] = desc;
+            }
+        }
+
+        try {
+            comm.sendMessage(proc.descriptor(), new HadoopJobInfoUpdateRequest(proc.jobId, meta.phase(), addrs));
+        }
+        catch (IgniteCheckedException e) {
+            if (!proc.terminated()) {
+                log.error("Failed to send job state update message to remote child process (will kill the process) " +
+                    "[jobId=" + proc.jobId + ", meta=" + meta + ']', e);
+
+                proc.terminate();
+            }
+        }
+    }
+
+    /**
+     * Sends prepare request to remote process.
+     *
+     * @param proc Process to send request to.
+     * @param job Job.
+     * @param plan Map reduce plan.
+     */
+    private void prepareForJob(HadoopProcess proc, HadoopJob job, HadoopMapReducePlan plan) {
+        try {
+            comm.sendMessage(proc.descriptor(), new HadoopPrepareForJobRequest(job.id(), job.info(),
+                plan.reducers(), plan.reducers(ctx.localNodeId())));
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send job prepare request to remote process [proc=" + proc + ", job=" + job +
+                ", plan=" + plan + ']', e);
+
+            proc.terminate();
+        }
+    }
+
+    /**
+     * Processes task finished message.
+     *
+     * @param desc Remote process descriptor.
+     * @param taskMsg Task finished message.
+     */
+    private void processTaskFinishedMessage(HadoopProcessDescriptor desc, HadoopTaskFinishedMessage taskMsg) {
+        HadoopProcess proc = runningProcsByProcId.get(desc.processId());
+
+        if (proc != null)
+            proc.removeTask(taskMsg.taskInfo());
+
+        jobTracker.onTaskFinished(taskMsg.taskInfo(), taskMsg.status());
+    }
+
+    /**
+     *
+     */
+    private class MessageListener implements HadoopMessageListener {
+        /** {@inheritDoc} */
+        @Override public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg) {
+            if (!busyLock.tryReadLock())
+                return;
+
+            try {
+                if (msg instanceof HadoopProcessStartedAck) {
+                    HadoopProcess proc = runningProcsByProcId.get(desc.processId());
+
+                    assert proc != null : "Missing child process for processId: " + desc;
+
+                    HadoopProcessFuture fut = proc.initFut;
+
+                    if (fut != null)
+                        fut.onReplyReceived(desc);
+                    // Safety.
+                    else
+                        log.warning("Failed to find process start future (will ignore): " + desc);
+                }
+                else if (msg instanceof HadoopTaskFinishedMessage) {
+                    HadoopTaskFinishedMessage taskMsg = (HadoopTaskFinishedMessage)msg;
+
+                    processTaskFinishedMessage(desc, taskMsg);
+                }
+                else
+                    log.warning("Unexpected message received by node [desc=" + desc + ", msg=" + msg + ']');
+            }
+            finally {
+                busyLock.readUnlock();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onConnectionLost(HadoopProcessDescriptor desc) {
+            if (!busyLock.tryReadLock())
+                return;
+
+            try {
+                if (desc == null) {
+                    U.warn(log, "Handshake failed.");
+
+                    return;
+                }
+
+                // Notify job tracker about failed tasks.
+                HadoopProcess proc = runningProcsByProcId.get(desc.processId());
+
+                if (proc != null) {
+                    Collection<HadoopTaskInfo> tasks = proc.tasks();
+
+                    if (!F.isEmpty(tasks)) {
+                        log.warning("Lost connection with alive process (will terminate): " + desc);
+
+                        HadoopTaskStatus status = new HadoopTaskStatus(CRASHED,
+                            new IgniteCheckedException("Failed to run tasks (external process finished unexpectedly): " + desc));
+
+                        for (HadoopTaskInfo info : tasks)
+                            jobTracker.onTaskFinished(info, status);
+
+                        runningProcsByJobId.remove(proc.jobId(), proc);
+                    }
+
+                    // Safety.
+                    proc.terminate();
+                }
+            }
+            finally {
+                busyLock.readUnlock();
+            }
+        }
+    }
+
+    /**
+     * Hadoop process.
+     */
+    private static class HadoopProcess extends ReentrantLock {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Job ID. */
+        private final HadoopJobId jobId;
+
+        /** Process. */
+        private Process proc;
+
+        /** Init future. Completes when process is ready to receive messages. */
+        private final HadoopProcessFuture initFut;
+
+        /** Process descriptor. */
+        private HadoopProcessDescriptor procDesc;
+
+        /** Reducers planned for this process. */
+        private Collection<Integer> reducers;
+
+        /** Tasks. */
+        private final Collection<HadoopTaskInfo> tasks = new ConcurrentLinkedDeque8<>();
+
+        /** Terminated flag. */
+        private volatile boolean terminated;
+
+        /**
+         * @param jobId Job ID.
+         * @param initFut Init future.
+         */
+        private HadoopProcess(HadoopJobId jobId, HadoopProcessFuture initFut,
+            int[] reducers) {
+            this.jobId = jobId;
+            this.initFut = initFut;
+
+            if (!F.isEmpty(reducers)) {
+                this.reducers = new ArrayList<>(reducers.length);
+
+                for (int r : reducers)
+                    this.reducers.add(r);
+            }
+        }
+
+        /**
+         * @return Communication process descriptor.
+         */
+        private HadoopProcessDescriptor descriptor() {
+            return procDesc;
+        }
+
+        /**
+         * @return Job ID.
+         */
+        public HadoopJobId jobId() {
+            return jobId;
+        }
+
+        /**
+         * Initialized callback.
+         *
+         * @param proc Java process representation.
+         * @param procDesc Process descriptor.
+         */
+        private void onInitialized(Process proc, HadoopProcessDescriptor procDesc) {
+            this.proc = proc;
+            this.procDesc = procDesc;
+        }
+
+        /**
+         * Terminates process (kills it).
+         */
+        private void terminate() {
+            // Guard against concurrent message sending.
+            lock();
+
+            try {
+                terminated = true;
+
+                if (!initFut.isDone())
+                    initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
+                        @Override public void apply(
+                            IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
+                            proc.destroy();
+                        }
+                    });
+                else
+                    proc.destroy();
+            }
+            finally {
+                unlock();
+            }
+        }
+
+        /**
+         * @return Terminated flag.
+         */
+        private boolean terminated() {
+            return terminated;
+        }
+
+        /**
+         * Sets process tasks.
+         *
+         * @param tasks Tasks to set.
+         */
+        private void addTasks(Collection<HadoopTaskInfo> tasks) {
+            this.tasks.addAll(tasks);
+        }
+
+        /**
+         * Removes task when it was completed.
+         *
+         * @param task Task to remove.
+         */
+        private void removeTask(HadoopTaskInfo task) {
+            if (tasks != null)
+                tasks.remove(task);
+        }
+
+        /**
+         * @return Collection of tasks.
+         */
+        private Collection<HadoopTaskInfo> tasks() {
+            return tasks;
+        }
+
+        /**
+         * @return Planned reducers.
+         */
+        private Collection<Integer> reducers() {
+            return reducers;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(HadoopProcess.class, this);
+        }
+    }
+
+    /**
+     *
+     */
+    private class HadoopProcessFuture extends GridFutureAdapter<IgniteBiTuple<Process, HadoopProcessDescriptor>> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Child process ID. */
+        private UUID childProcId;
+
+        /** Job ID. */
+        private HadoopJobId jobId;
+
+        /** Process descriptor. */
+        private HadoopProcessDescriptor desc;
+
+        /** Running process. */
+        private Process proc;
+
+        /** Process started flag. */
+        private volatile boolean procStarted;
+
+        /** Reply received flag. */
+        private volatile boolean replyReceived;
+
+        /** Logger. */
+        private final IgniteLogger log = HadoopExternalTaskExecutor.this.log;
+
+        /**
+         */
+        private HadoopProcessFuture(UUID childProcId, HadoopJobId jobId) {
+            this.childProcId = childProcId;
+            this.jobId = jobId;
+        }
+
+        /**
+         * Process started callback.
+         */
+        public void onProcessStarted(Process proc) {
+            this.proc = proc;
+
+            procStarted = true;
+
+            if (procStarted && replyReceived)
+                onDone(F.t(proc, desc));
+        }
+
+        /**
+         * Reply received callback.
+         */
+        public void onReplyReceived(HadoopProcessDescriptor desc) {
+            assert childProcId.equals(desc.processId());
+
+            this.desc = desc;
+
+            replyReceived = true;
+
+            if (procStarted && replyReceived)
+                onDone(F.t(proc, desc));
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean onDone(@Nullable IgniteBiTuple<Process, HadoopProcessDescriptor> res,
+            @Nullable Throwable err) {
+            if (err == null) {
+                HadoopProcess proc = runningProcsByProcId.get(childProcId);
+
+                assert proc != null;
+
+                assert proc.initFut == this;
+
+                proc.onInitialized(res.get1(), res.get2());
+
+                if (!F.isEmpty(proc.reducers()))
+                    jobTracker.onExternalMappersInitialized(jobId, proc.reducers(), desc);
+            }
+            else {
+                // Clean up since init failed.
+                runningProcsByJobId.remove(jobId);
+                runningProcsByProcId.remove(childProcId);
+            }
+
+            if (super.onDone(res, err)) {
+                if (err == null) {
+                    if (log.isDebugEnabled())
+                        log.debug("Initialized child process for external task execution [jobId=" + jobId +
+                            ", desc=" + desc + ", initTime=" + duration() + ']');
+                }
+                else
+                    U.error(log, "Failed to initialize child process for external task execution [jobId=" + jobId +
+                        ", desc=" + desc + ']', err);
+
+                return true;
+            }
+
+            return false;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
new file mode 100644
index 0000000..27b0329
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.util.Collection;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * External task metadata (classpath, JVM options) needed to start external process execution.
+ */
+public class HadoopExternalTaskMetadata {
+    /** Process classpath. */
+    private Collection<String> classpath;
+
+    /** JVM options. */
+    @GridToStringInclude
+    private Collection<String> jvmOpts;
+
+    /**
+     * @return JVM Options.
+     */
+    public Collection<String> jvmOptions() {
+        return jvmOpts;
+    }
+
+    /**
+     * @param jvmOpts JVM options.
+     */
+    public void jvmOptions(Collection<String> jvmOpts) {
+        this.jvmOpts = jvmOpts;
+    }
+
+    /**
+     * @return Classpath.
+     */
+    public Collection<String> classpath() {
+        return classpath;
+    }
+
+    /**
+     * @param classpath Classpath.
+     */
+    public void classpath(Collection<String> classpath) {
+        this.classpath = classpath;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopExternalTaskMetadata.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
new file mode 100644
index 0000000..96b3675
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Job info update request.
+ */
+public class HadoopJobInfoUpdateRequest implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Job ID. */
+    @GridToStringInclude
+    private HadoopJobId jobId;
+
+    /** Job phase. */
+    @GridToStringInclude
+    private HadoopJobPhase jobPhase;
+
+    /** Reducers addresses. */
+    @GridToStringInclude
+    private HadoopProcessDescriptor[] reducersAddrs;
+
+    /**
+     * Constructor required by {@link Externalizable}.
+     */
+    public HadoopJobInfoUpdateRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param jobId Job ID.
+     * @param jobPhase Job phase.
+     * @param reducersAddrs Reducers addresses.
+     */
+    public HadoopJobInfoUpdateRequest(HadoopJobId jobId, HadoopJobPhase jobPhase,
+        HadoopProcessDescriptor[] reducersAddrs) {
+        assert jobId != null;
+
+        this.jobId = jobId;
+        this.jobPhase = jobPhase;
+        this.reducersAddrs = reducersAddrs;
+    }
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /**
+     * @return Job phase.
+     */
+    public HadoopJobPhase jobPhase() {
+        return jobPhase;
+    }
+
+    /**
+     * @return Reducers addresses.
+     */
+    public HadoopProcessDescriptor[] reducersAddresses() {
+        return reducersAddrs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        jobId.writeExternal(out);
+
+        out.writeObject(jobPhase);
+        U.writeArray(out, reducersAddrs);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobId = new HadoopJobId();
+        jobId.readExternal(in);
+
+        jobPhase = (HadoopJobPhase)in.readObject();
+        reducersAddrs = (HadoopProcessDescriptor[])U.readArray(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopJobInfoUpdateRequest.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
new file mode 100644
index 0000000..43bdc36
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Child process initialization request.
+ */
+public class HadoopPrepareForJobRequest implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Job ID. */
+    @GridToStringInclude
+    private HadoopJobId jobId;
+
+    /** Job info. */
+    @GridToStringInclude
+    private HadoopJobInfo jobInfo;
+
+    /** Total amount of reducers in the job. */
+    @GridToStringInclude
+    private int totalReducersCnt;
+
+    /** Reducers to be executed on current node. */
+    @GridToStringInclude
+    private int[] locReducers;
+
+    /**
+     * Constructor required by {@link Externalizable}.
+     */
+    public HadoopPrepareForJobRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param jobId Job ID.
+     * @param jobInfo Job info.
+     * @param totalReducersCnt Number of reducers in the job.
+     * @param locReducers Reducers to be executed on current node.
+     */
+    public HadoopPrepareForJobRequest(HadoopJobId jobId, HadoopJobInfo jobInfo, int totalReducersCnt,
+        int[] locReducers) {
+        assert jobId != null;
+
+        this.jobId = jobId;
+        this.jobInfo = jobInfo;
+        this.totalReducersCnt = totalReducersCnt;
+        this.locReducers = locReducers;
+    }
+
+    /**
+     * @return Job info.
+     */
+    public HadoopJobInfo jobInfo() {
+        return jobInfo;
+    }
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /**
+     * @return Reducers to be executed on current node.
+     */
+    public int[] localReducers() {
+        return locReducers;
+    }
+
+    /**
+     * @return Number of reducers in job.
+     */
+    public int totalReducerCount() {
+        return totalReducersCnt;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        jobId.writeExternal(out);
+
+        out.writeObject(jobInfo);
+        out.writeInt(totalReducersCnt);
+
+        U.writeIntArray(out, locReducers);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobId = new HadoopJobId();
+        jobId.readExternal(in);
+
+        jobInfo = (HadoopJobInfo)in.readObject();
+        totalReducersCnt = in.readInt();
+
+        locReducers = U.readIntArray(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopPrepareForJobRequest.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
new file mode 100644
index 0000000..2dc233b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.Serializable;
+import java.util.UUID;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Process descriptor used to identify process for which task is running.
+ */
+public class HadoopProcessDescriptor implements Serializable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Parent node ID. */
+    private UUID parentNodeId;
+
+    /** Process ID. */
+    private UUID procId;
+
+    /** Address. */
+    private String addr;
+
+    /** TCP port. */
+    private int tcpPort;
+
+    /** Shared memory port. */
+    private int shmemPort;
+
+    /**
+     * @param parentNodeId Parent node ID.
+     * @param procId Process ID.
+     */
+    public HadoopProcessDescriptor(UUID parentNodeId, UUID procId) {
+        this.parentNodeId = parentNodeId;
+        this.procId = procId;
+    }
+
+    /**
+     * Gets process ID.
+     *
+     * @return Process ID.
+     */
+    public UUID processId() {
+        return procId;
+    }
+
+    /**
+     * Gets parent node ID.
+     *
+     * @return Parent node ID.
+     */
+    public UUID parentNodeId() {
+        return parentNodeId;
+    }
+
+    /**
+     * Gets host address.
+     *
+     * @return Host address.
+     */
+    public String address() {
+        return addr;
+    }
+
+    /**
+     * Sets host address.
+     *
+     * @param addr Host address.
+     */
+    public void address(String addr) {
+        this.addr = addr;
+    }
+
+    /**
+     * @return Shared memory port.
+     */
+    public int sharedMemoryPort() {
+        return shmemPort;
+    }
+
+    /**
+     * Sets shared memory port.
+     *
+     * @param shmemPort Shared memory port.
+     */
+    public void sharedMemoryPort(int shmemPort) {
+        this.shmemPort = shmemPort;
+    }
+
+    /**
+     * @return TCP port.
+     */
+    public int tcpPort() {
+        return tcpPort;
+    }
+
+    /**
+     * Sets TCP port.
+     *
+     * @param tcpPort TCP port.
+     */
+    public void tcpPort(int tcpPort) {
+        this.tcpPort = tcpPort;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof HadoopProcessDescriptor))
+            return false;
+
+        HadoopProcessDescriptor that = (HadoopProcessDescriptor)o;
+
+        return parentNodeId.equals(that.parentNodeId) && procId.equals(that.procId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int result = parentNodeId.hashCode();
+
+        result = 31 * result + procId.hashCode();
+
+        return result;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopProcessDescriptor.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
new file mode 100644
index 0000000..b35f3ec
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Process started message.
+ */
+public class HadoopProcessStartedAck implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopProcessStartedAck.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
new file mode 100644
index 0000000..3875304
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Collection;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Message sent from node to child process to start task(s) execution.
+ */
+public class HadoopTaskExecutionRequest implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Job ID. */
+    @GridToStringInclude
+    private HadoopJobId jobId;
+
+    /** Job info. */
+    @GridToStringInclude
+    private HadoopJobInfo jobInfo;
+
+    /** Mappers. */
+    @GridToStringInclude
+    private Collection<HadoopTaskInfo> tasks;
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /**
+     * @param jobId Job ID.
+     */
+    public void jobId(HadoopJobId jobId) {
+        this.jobId = jobId;
+    }
+
+    /**
+     * @return Jon info.
+     */
+    public HadoopJobInfo jobInfo() {
+        return jobInfo;
+    }
+
+    /**
+     * @param jobInfo Job info.
+     */
+    public void jobInfo(HadoopJobInfo jobInfo) {
+        this.jobInfo = jobInfo;
+    }
+
+    /**
+     * @return Tasks.
+     */
+    public Collection<HadoopTaskInfo> tasks() {
+        return tasks;
+    }
+
+    /**
+     * @param tasks Tasks.
+     */
+    public void tasks(Collection<HadoopTaskInfo> tasks) {
+        this.tasks = tasks;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopTaskExecutionRequest.class, this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        jobId.writeExternal(out);
+
+        out.writeObject(jobInfo);
+        U.writeCollection(out, tasks);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobId = new HadoopJobId();
+        jobId.readExternal(in);
+
+        jobInfo = (HadoopJobInfo)in.readObject();
+        tasks = U.readCollection(in);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
new file mode 100644
index 0000000..9e1fdb3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Task finished message. Sent when local task finishes execution.
+ */
+public class HadoopTaskFinishedMessage implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Finished task info. */
+    private HadoopTaskInfo taskInfo;
+
+    /** Task finish status. */
+    private HadoopTaskStatus status;
+
+    /**
+     * Constructor required by {@link Externalizable}.
+     */
+    public HadoopTaskFinishedMessage() {
+        // No-op.
+    }
+
+    /**
+     * @param taskInfo Finished task info.
+     * @param status Task finish status.
+     */
+    public HadoopTaskFinishedMessage(HadoopTaskInfo taskInfo, HadoopTaskStatus status) {
+        assert taskInfo != null;
+        assert status != null;
+
+        this.taskInfo = taskInfo;
+        this.status = status;
+    }
+
+    /**
+     * @return Finished task info.
+     */
+    public HadoopTaskInfo taskInfo() {
+        return taskInfo;
+    }
+
+    /**
+     * @return Task finish status.
+     */
+    public HadoopTaskStatus status() {
+        return status;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopTaskFinishedMessage.class, this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        taskInfo.writeExternal(out);
+        status.writeExternal(out);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        taskInfo = new HadoopTaskInfo();
+        taskInfo.readExternal(in);
+
+        status = new HadoopTaskStatus();
+        status.readExternal(in);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
new file mode 100644
index 0000000..4a946e9
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java
@@ -0,0 +1,459 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor.external.child;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleAck;
+import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleJob;
+import org.apache.ignite.internal.processors.hadoop.shuffle.HadoopShuffleMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopExecutorService;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopRunnableTask;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopJobInfoUpdateRequest;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopPrepareForJobRequest;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessStartedAck;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopTaskExecutionRequest;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopTaskFinishedMessage;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopExternalCommunication;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.communication.HadoopMessageListener;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.REDUCE;
+
+/**
+ * Hadoop process base.
+ */
+@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+public class HadoopChildProcessRunner {
+    /** Node process descriptor. */
+    private HadoopProcessDescriptor nodeDesc;
+
+    /** Message processing executor service. */
+    private ExecutorService msgExecSvc;
+
+    /** Task executor service. */
+    private HadoopExecutorService execSvc;
+
+    /** */
+    protected GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+    /** External communication. */
+    private HadoopExternalCommunication comm;
+
+    /** Logger. */
+    private IgniteLogger log;
+
+    /** Init guard. */
+    private final AtomicBoolean initGuard = new AtomicBoolean();
+
+    /** Start time. */
+    private long startTime;
+
+    /** Init future. */
+    private final GridFutureAdapter<?> initFut = new GridFutureAdapter<>();
+
+    /** Job instance. */
+    private HadoopJob job;
+
+    /** Number of uncompleted tasks. */
+    private final AtomicInteger pendingTasks = new AtomicInteger();
+
+    /** Shuffle job. */
+    private HadoopShuffleJob<HadoopProcessDescriptor> shuffleJob;
+
+    /** Concurrent mappers. */
+    private int concMappers;
+
+    /** Concurrent reducers. */
+    private int concReducers;
+
+    /**
+     * Starts child process runner.
+     */
+    public void start(HadoopExternalCommunication comm, HadoopProcessDescriptor nodeDesc,
+        ExecutorService msgExecSvc, IgniteLogger parentLog)
+        throws IgniteCheckedException {
+        this.comm = comm;
+        this.nodeDesc = nodeDesc;
+        this.msgExecSvc = msgExecSvc;
+
+        comm.setListener(new MessageListener());
+        log = parentLog.getLogger(HadoopChildProcessRunner.class);
+
+        startTime = U.currentTimeMillis();
+
+        // At this point node knows that this process has started.
+        comm.sendMessage(this.nodeDesc, new HadoopProcessStartedAck());
+    }
+
+    /**
+     * Initializes process for task execution.
+     *
+     * @param req Initialization request.
+     */
+    private void prepareProcess(HadoopPrepareForJobRequest req) {
+        if (initGuard.compareAndSet(false, true)) {
+            try {
+                if (log.isDebugEnabled())
+                    log.debug("Initializing external hadoop task: " + req);
+
+                assert job == null;
+
+                job = req.jobInfo().createJob(HadoopV2Job.class, req.jobId(), log, null);
+
+                job.initialize(true, nodeDesc.processId());
+
+                shuffleJob = new HadoopShuffleJob<>(comm.localProcessDescriptor(), log, job, mem,
+                    req.totalReducerCount(), req.localReducers());
+
+                initializeExecutors(req);
+
+                if (log.isDebugEnabled())
+                    log.debug("External process initialized [initWaitTime=" +
+                        (U.currentTimeMillis() - startTime) + ']');
+
+                initFut.onDone();
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to initialize process: " + req, e);
+
+                initFut.onDone(e);
+            }
+        }
+        else
+            log.warning("Duplicate initialize process request received (will ignore): " + req);
+    }
+
+    /**
+     * @param req Task execution request.
+     */
+    private void runTasks(final HadoopTaskExecutionRequest req) {
+        if (!initFut.isDone() && log.isDebugEnabled())
+            log.debug("Will wait for process initialization future completion: " + req);
+
+        initFut.listen(new CI1<IgniteInternalFuture<?>>() {
+            @Override public void apply(IgniteInternalFuture<?> f) {
+                try {
+                    // Make sure init was successful.
+                    f.get();
+
+                    boolean set = pendingTasks.compareAndSet(0, req.tasks().size());
+
+                    assert set;
+
+                    HadoopTaskInfo info = F.first(req.tasks());
+
+                    assert info != null;
+
+                    int size = info.type() == MAP ? concMappers : concReducers;
+
+//                    execSvc.setCorePoolSize(size);
+//                    execSvc.setMaximumPoolSize(size);
+
+                    if (log.isDebugEnabled())
+                        log.debug("Set executor service size for task type [type=" + info.type() +
+                            ", size=" + size + ']');
+
+                    for (HadoopTaskInfo taskInfo : req.tasks()) {
+                        if (log.isDebugEnabled())
+                            log.debug("Submitted task for external execution: " + taskInfo);
+
+                        execSvc.submit(new HadoopRunnableTask(log, job, mem, taskInfo, nodeDesc.parentNodeId()) {
+                            @Override protected void onTaskFinished(HadoopTaskStatus status) {
+                                onTaskFinished0(this, status);
+                            }
+
+                            @Override protected HadoopTaskInput createInput(HadoopTaskContext ctx)
+                                throws IgniteCheckedException {
+                                return shuffleJob.input(ctx);
+                            }
+
+                            @Override protected HadoopTaskOutput createOutput(HadoopTaskContext ctx)
+                                throws IgniteCheckedException {
+                                return shuffleJob.output(ctx);
+                            }
+                        });
+                    }
+                }
+                catch (IgniteCheckedException e) {
+                    for (HadoopTaskInfo info : req.tasks())
+                        notifyTaskFinished(info, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
+                }
+            }
+        });
+    }
+
+    /**
+     * Creates executor services.
+     *
+     * @param req Init child process request.
+     */
+    private void initializeExecutors(HadoopPrepareForJobRequest req) {
+        int cpus = Runtime.getRuntime().availableProcessors();
+//
+//        concMappers = get(req.jobInfo(), EXTERNAL_CONCURRENT_MAPPERS, cpus);
+//        concReducers = get(req.jobInfo(), EXTERNAL_CONCURRENT_REDUCERS, cpus);
+
+        execSvc = new HadoopExecutorService(log, "", cpus * 2, 1024);
+    }
+
+    /**
+     * Updates external process map so that shuffle can proceed with sending messages to reducers.
+     *
+     * @param req Update request.
+     */
+    private void updateTasks(final HadoopJobInfoUpdateRequest req) {
+        initFut.listen(new CI1<IgniteInternalFuture<?>>() {
+            @Override public void apply(IgniteInternalFuture<?> gridFut) {
+                assert initGuard.get();
+
+                assert req.jobId().equals(job.id());
+
+                if (req.reducersAddresses() != null) {
+                    if (shuffleJob.initializeReduceAddresses(req.reducersAddresses())) {
+                        shuffleJob.startSending("external",
+                            new IgniteInClosure2X<HadoopProcessDescriptor, HadoopShuffleMessage>() {
+                                @Override public void applyx(HadoopProcessDescriptor dest,
+                                    HadoopShuffleMessage msg) throws IgniteCheckedException {
+                                    comm.sendMessage(dest, msg);
+                                }
+                            });
+                    }
+                }
+            }
+        });
+    }
+
+    /**
+     * Stops all executors and running tasks.
+     */
+    private void shutdown() {
+        if (execSvc != null)
+            execSvc.shutdown(5000);
+
+        if (msgExecSvc != null)
+            msgExecSvc.shutdownNow();
+
+        try {
+            job.dispose(true);
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to dispose job.", e);
+        }
+    }
+
+    /**
+     * Notifies node about task finish.
+     *
+     * @param run Finished task runnable.
+     * @param status Task status.
+     */
+    private void onTaskFinished0(HadoopRunnableTask run, HadoopTaskStatus status) {
+        HadoopTaskInfo info = run.taskInfo();
+
+        int pendingTasks0 = pendingTasks.decrementAndGet();
+
+        if (log.isDebugEnabled())
+            log.debug("Hadoop task execution finished [info=" + info
+                + ", state=" + status.state() + ", waitTime=" + run.waitTime() + ", execTime=" + run.executionTime() +
+                ", pendingTasks=" + pendingTasks0 +
+                ", err=" + status.failCause() + ']');
+
+        assert info.type() == MAP || info.type() == REDUCE : "Only MAP or REDUCE tasks are supported.";
+
+        boolean flush = pendingTasks0 == 0 && info.type() == MAP;
+
+        notifyTaskFinished(info, status, flush);
+    }
+
+    /**
+     * @param taskInfo Finished task info.
+     * @param status Task status.
+     */
+    private void notifyTaskFinished(final HadoopTaskInfo taskInfo, final HadoopTaskStatus status,
+        boolean flush) {
+
+        final HadoopTaskState state = status.state();
+        final Throwable err = status.failCause();
+
+        if (!flush) {
+            try {
+                if (log.isDebugEnabled())
+                    log.debug("Sending notification to parent node [taskInfo=" + taskInfo + ", state=" + state +
+                        ", err=" + err + ']');
+
+                comm.sendMessage(nodeDesc, new HadoopTaskFinishedMessage(taskInfo, status));
+            }
+            catch (IgniteCheckedException e) {
+                log.error("Failed to send message to parent node (will terminate child process).", e);
+
+                shutdown();
+
+                terminate();
+            }
+        }
+        else {
+            if (log.isDebugEnabled())
+                log.debug("Flushing shuffle messages before sending last task completion notification [taskInfo=" +
+                    taskInfo + ", state=" + state + ", err=" + err + ']');
+
+            final long start = U.currentTimeMillis();
+
+            try {
+                shuffleJob.flush().listen(new CI1<IgniteInternalFuture<?>>() {
+                    @Override public void apply(IgniteInternalFuture<?> f) {
+                        long end = U.currentTimeMillis();
+
+                        if (log.isDebugEnabled())
+                            log.debug("Finished flushing shuffle messages [taskInfo=" + taskInfo +
+                                ", flushTime=" + (end - start) + ']');
+
+                        try {
+                            // Check for errors on shuffle.
+                            f.get();
+
+                            notifyTaskFinished(taskInfo, status, false);
+                        }
+                        catch (IgniteCheckedException e) {
+                            log.error("Failed to flush shuffle messages (will fail the task) [taskInfo=" + taskInfo +
+                                ", state=" + state + ", err=" + err + ']', e);
+
+                            notifyTaskFinished(taskInfo,
+                                new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
+                        }
+                    }
+                });
+            }
+            catch (IgniteCheckedException e) {
+                log.error("Failed to flush shuffle messages (will fail the task) [taskInfo=" + taskInfo +
+                    ", state=" + state + ", err=" + err + ']', e);
+
+                notifyTaskFinished(taskInfo, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false);
+            }
+        }
+    }
+
+    /**
+     * Checks if message was received from parent node and prints warning if not.
+     *
+     * @param desc Sender process ID.
+     * @param msg Received message.
+     * @return {@code True} if received from parent node.
+     */
+    private boolean validateNodeMessage(HadoopProcessDescriptor desc, HadoopMessage msg) {
+        if (!nodeDesc.processId().equals(desc.processId())) {
+            log.warning("Received process control request from unknown process (will ignore) [desc=" + desc +
+                ", msg=" + msg + ']');
+
+            return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Stops execution of this process.
+     */
+    private void terminate() {
+        System.exit(1);
+    }
+
+    /**
+     * Message listener.
+     */
+    private class MessageListener implements HadoopMessageListener {
+        /** {@inheritDoc} */
+        @Override public void onMessageReceived(final HadoopProcessDescriptor desc, final HadoopMessage msg) {
+            if (msg instanceof HadoopTaskExecutionRequest) {
+                if (validateNodeMessage(desc, msg))
+                    runTasks((HadoopTaskExecutionRequest)msg);
+            }
+            else if (msg instanceof HadoopJobInfoUpdateRequest) {
+                if (validateNodeMessage(desc, msg))
+                    updateTasks((HadoopJobInfoUpdateRequest)msg);
+            }
+            else if (msg instanceof HadoopPrepareForJobRequest) {
+                if (validateNodeMessage(desc, msg))
+                    prepareProcess((HadoopPrepareForJobRequest)msg);
+            }
+            else if (msg instanceof HadoopShuffleMessage) {
+                if (log.isTraceEnabled())
+                    log.trace("Received shuffle message [desc=" + desc + ", msg=" + msg + ']');
+
+                initFut.listen(new CI1<IgniteInternalFuture<?>>() {
+                    @Override public void apply(IgniteInternalFuture<?> f) {
+                        try {
+                            HadoopShuffleMessage m = (HadoopShuffleMessage)msg;
+
+                            shuffleJob.onShuffleMessage(m);
+
+                            comm.sendMessage(desc, new HadoopShuffleAck(m.id(), m.jobId()));
+                        }
+                        catch (IgniteCheckedException e) {
+                            U.error(log, "Failed to process hadoop shuffle message [desc=" + desc + ", msg=" + msg + ']', e);
+                        }
+                    }
+                });
+            }
+            else if (msg instanceof HadoopShuffleAck) {
+                if (log.isTraceEnabled())
+                    log.trace("Received shuffle ack [desc=" + desc + ", msg=" + msg + ']');
+
+                shuffleJob.onShuffleAck((HadoopShuffleAck)msg);
+            }
+            else
+                log.warning("Unknown message received (will ignore) [desc=" + desc + ", msg=" + msg + ']');
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onConnectionLost(HadoopProcessDescriptor desc) {
+            if (log.isDebugEnabled())
+                log.debug("Lost connection with remote process: " + desc);
+
+            if (desc == null)
+                U.warn(log, "Handshake failed.");
+            else if (desc.processId().equals(nodeDesc.processId())) {
+                log.warning("Child process lost connection with parent node (will terminate child process).");
+
+                shutdown();
+
+                terminate();
+            }
+        }
+    }
+}
\ No newline at end of file


[29/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java
new file mode 100644
index 0000000..f542cf2
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2TaskContext;
+
+/**
+ * Context for test purpose.
+ */
+class HadoopTestTaskContext extends HadoopV2TaskContext {
+    /**
+     * Simple key-vale pair.
+     * @param <K> Key class.
+     * @param <V> Value class.
+     */
+    public static class Pair<K,V> {
+        /** Key */
+        private K key;
+
+        /** Value */
+        private V val;
+
+        /**
+         * @param key key.
+         * @param val value.
+         */
+        Pair(K key, V val) {
+            this.key = key;
+            this.val = val;
+        }
+
+        /**
+         * Getter of key.
+         * @return key.
+         */
+        K key() {
+            return key;
+        }
+
+        /**
+         * Getter of value.
+         * @return value.
+         */
+        V value() {
+            return val;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return key + "," + val;
+        }
+    }
+
+    /** Mock output container- result data of task execution if it is not overridden. */
+    private List<Pair<String, Integer>> mockOutput = new ArrayList<>();
+
+    /** Mock input container- input data if it is not overridden. */
+    private Map<Object,List> mockInput = new TreeMap<>();
+
+    /** Context output implementation to write data into mockOutput. */
+    private HadoopTaskOutput output = new HadoopTaskOutput() {
+        /** {@inheritDoc} */
+        @Override public void write(Object key, Object val) {
+            //Check of casting and extract/copy values
+            String strKey = new String(((Text)key).getBytes());
+            int intVal = ((IntWritable)val).get();
+
+            mockOutput().add(new Pair<>(strKey, intVal));
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() {
+            throw new UnsupportedOperationException();
+        }
+    };
+
+    /** Context input implementation to read data from mockInput. */
+    private HadoopTaskInput input = new HadoopTaskInput() {
+        /** Iterator of keys and associated lists of values. */
+        Iterator<Map.Entry<Object, List>> iter;
+
+        /** Current key and associated value list. */
+        Map.Entry<Object, List> currEntry;
+
+        /** {@inheritDoc} */
+        @Override public boolean next() {
+            if (iter == null)
+                iter = mockInput().entrySet().iterator();
+
+            if (iter.hasNext())
+                currEntry = iter.next();
+            else
+                currEntry = null;
+
+            return currEntry != null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object key() {
+            return currEntry.getKey();
+        }
+
+        /** {@inheritDoc} */
+        @Override public Iterator<?> values() {
+            return currEntry.getValue().iterator() ;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() {
+            throw new UnsupportedOperationException();
+        }
+    };
+
+    /**
+     * Getter of mock output container - result of task if it is not overridden.
+     *
+     * @return mock output.
+     */
+    public List<Pair<String, Integer>> mockOutput() {
+        return mockOutput;
+    }
+
+    /**
+     * Getter of mock input container- input data if it is not overridden.
+     *
+     * @return mock output.
+     */
+    public Map<Object, List> mockInput() {
+        return mockInput;
+    }
+
+    /**
+     * Generate one-key-multiple-values tree from array of key-value pairs, and wrap its into Writable objects.
+     * The result is placed into mock input.
+     *
+     * @param flatData list of key-value pair.
+     */
+    public void makeTreeOfWritables(Iterable<Pair<String, Integer>> flatData) {
+        Text key = new Text();
+
+        for (HadoopTestTaskContext.Pair<String, Integer> pair : flatData) {
+            key.set(pair.key);
+            ArrayList<IntWritable> valList;
+
+            if (!mockInput.containsKey(key)) {
+                valList = new ArrayList<>();
+                mockInput.put(key, valList);
+                key = new Text();
+            }
+            else
+                valList = (ArrayList<IntWritable>) mockInput.get(key);
+            valList.add(new IntWritable(pair.value()));
+        }
+    }
+
+    /**
+     * @param taskInfo Task info.
+     * @param gridJob Grid Hadoop job.
+     */
+    public HadoopTestTaskContext(HadoopTaskInfo taskInfo, HadoopJob gridJob) throws IgniteCheckedException {
+        super(taskInfo, gridJob, gridJob.id(), null, jobConfDataInput(gridJob));
+    }
+
+    /**
+     * Creates DataInput to read JobConf.
+     *
+     * @param job Job.
+     * @return DataInput with JobConf.
+     * @throws IgniteCheckedException If failed.
+     */
+    private static DataInput jobConfDataInput(HadoopJob job) throws IgniteCheckedException {
+        JobConf jobConf = new JobConf();
+
+        for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)job.info()).properties().entrySet())
+            jobConf.set(e.getKey(), e.getValue());
+
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+        try {
+            jobConf.write(new DataOutputStream(buf));
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+
+        return new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskOutput output() {
+        return output;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskInput input() {
+        return input;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java
new file mode 100644
index 0000000..da0d922
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Utility class for tests.
+ */
+public class HadoopTestUtils {
+    /** Base test directory. */
+    private static final File BASE_TEST_DIR = new File(U.getIgniteHome() + "/work/test/hadoop/");
+
+    /**
+     * @return Base directory for tests.
+     */
+    public static File baseTestDir() {
+        return BASE_TEST_DIR;
+    }
+
+    /**
+     * Get test directory.
+     *
+     * @param parts Parts.
+     * @return Directory.
+     */
+    public static File testDir(String... parts) {
+        File res = BASE_TEST_DIR;
+
+        if (parts != null) {
+            for (String part : parts)
+                res = new File(res, part);
+        }
+
+        return res;
+    }
+
+    /**
+     * Clear base test directory.
+     */
+    public static void clearBaseTestDir() {
+        if (baseTestDir().exists())
+            assert delete(baseTestDir());
+    }
+
+    /**
+     * Checks that job statistics file contains valid strings only.
+     *
+     * @param reader Buffered reader to get lines of job statistics.
+     * @return Amount of events.
+     * @throws IOException If failed.
+     */
+    @SuppressWarnings("ResultOfMethodCallIgnored")
+    public static long simpleCheckJobStatFile(BufferedReader reader) throws IOException {
+        Collection<String> phases = new HashSet<>();
+
+        phases.add("submit");
+        phases.add("prepare");
+        phases.add("start");
+        phases.add("finish");
+        phases.add("requestId");
+        phases.add("responseId");
+
+        Collection<String> evtTypes = new HashSet<>();
+
+        evtTypes.add("JOB");
+        evtTypes.add("SETUP");
+        evtTypes.add("MAP");
+        evtTypes.add("SHUFFLE");
+        evtTypes.add("REDUCE");
+        evtTypes.add("COMBINE");
+        evtTypes.add("COMMIT");
+
+        long evtCnt = 0;
+        String line;
+
+        Map<Long, String> reduceNodes = new HashMap<>();
+
+        while((line = reader.readLine()) != null) {
+            String[] splitLine = line.split(":");
+
+            //Try parse timestamp
+            Long.parseLong(splitLine[1]);
+
+            String[] evt = splitLine[0].split(" ");
+
+            assertTrue("Unknown event '" + evt[0] + "'", evtTypes.contains(evt[0]));
+
+            String phase;
+
+            if ("JOB".equals(evt[0]))
+                phase = evt[1];
+            else {
+                assertEquals(4, evt.length);
+                assertTrue("The node id is not defined", !F.isEmpty(evt[3]));
+
+                long taskNum = Long.parseLong(evt[1]);
+
+                if (("REDUCE".equals(evt[0]) || "SHUFFLE".equals(evt[0]))) {
+                    String nodeId = reduceNodes.get(taskNum);
+
+                    if (nodeId == null)
+                        reduceNodes.put(taskNum, evt[3]);
+                    else
+                        assertEquals("Different nodes for SHUFFLE and REDUCE tasks", nodeId, evt[3]);
+                }
+
+                phase = evt[2];
+            }
+
+            assertTrue("Unknown phase '" + phase + "' in " + Arrays.toString(evt), phases.contains(phase));
+
+            evtCnt++;
+        }
+
+        return evtCnt;
+    }
+
+    /**
+     * Deletes file or directory with all sub-directories and files.
+     *
+     * @param file File or directory to delete.
+     * @return {@code true} if and only if the file or directory is successfully deleted,
+     *      {@code false} otherwise
+     */
+    public static boolean delete(@Nullable File file) {
+        if (file == null)
+            return false;
+
+        boolean res = true;
+
+        if (file.isDirectory()) {
+            File[] files = file.listFiles();
+
+            if (files != null && files.length > 0)
+                for (File file1 : files)
+                    if (file1.isDirectory())
+                        res &= delete(file1);
+                    else
+                        res &= file1.delete();
+
+            res &= file.delete();
+        }
+        else
+            res = file.delete();
+
+        return res;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopUserLibsSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopUserLibsSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopUserLibsSelfTest.java
new file mode 100644
index 0000000..9e3c8f4
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopUserLibsSelfTest.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+
+/**
+ * Tests for user libs parsing.
+ */
+public class HadoopUserLibsSelfTest extends GridCommonAbstractTest {
+    /** Directory 1. */
+    private static final File DIR_1 = HadoopTestUtils.testDir("dir1");
+
+    /** File 1 in directory 1. */
+    private static final File FILE_1_1 = new File(DIR_1, "file1.jar");
+
+    /** File 2 in directory 1. */
+    private static final File FILE_1_2 = new File(DIR_1, "file2.jar");
+
+    /** Directory 2. */
+    private static final File DIR_2 = HadoopTestUtils.testDir("dir2");
+
+    /** File 1 in directory 2. */
+    private static final File FILE_2_1 = new File(DIR_2, "file1.jar");
+
+    /** File 2 in directory 2. */
+    private static final File FILE_2_2 = new File(DIR_2, "file2.jar");
+
+    /** Missing directory. */
+    private static final File MISSING_DIR = HadoopTestUtils.testDir("missing_dir");
+
+    /** Missing file. */
+    private static final File MISSING_FILE = new File(MISSING_DIR, "file.jar");
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        HadoopTestUtils.clearBaseTestDir();
+
+        assert DIR_1.mkdirs();
+        assert DIR_2.mkdirs();
+
+        assert FILE_1_1.createNewFile();
+        assert FILE_1_2.createNewFile();
+        assert FILE_2_1.createNewFile();
+        assert FILE_2_2.createNewFile();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        // Sanity checks before test start.
+        ensureExists(FILE_1_1);
+        ensureExists(FILE_1_2);
+        ensureExists(FILE_2_1);
+        ensureExists(FILE_2_2);
+
+        ensureNotExists(MISSING_DIR);
+        ensureNotExists(MISSING_FILE);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        HadoopTestUtils.clearBaseTestDir();
+    }
+
+    /**
+     * Test null or empty user libs.
+     *
+     * @throws Exception If failed.
+     */
+    public void testNullOrEmptyUserLibs() throws Exception {
+        assert parse(null).isEmpty();
+        assert parse("").isEmpty();
+    }
+
+    /**
+     * Test single file.
+     *
+     * @throws Exception If failed.
+     */
+    public void testSingle() throws Exception {
+        Collection<File> res = parse(single(FILE_1_1));
+
+        assert res.size() == 1;
+        assert res.contains(FILE_1_1);
+
+        res = parse(single(MISSING_FILE));
+
+        assert res.size() == 0;
+    }
+
+    /**
+     * Test multiple files.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultiple() throws Exception {
+        Collection<File> res =
+            parse(merge(single(FILE_1_1), single(FILE_1_2), single(FILE_2_1), single(FILE_2_2), single(MISSING_FILE)));
+
+        assert res.size() == 4;
+        assert res.contains(FILE_1_1);
+        assert res.contains(FILE_1_2);
+        assert res.contains(FILE_2_1);
+        assert res.contains(FILE_2_2);
+    }
+
+    /**
+     * Test single wildcard.
+     *
+     * @throws Exception If failed.
+     */
+    public void testSingleWildcard() throws Exception {
+        Collection<File> res = parse(wildcard(DIR_1));
+
+        assert res.size() == 2;
+        assert res.contains(FILE_1_1);
+        assert res.contains(FILE_1_2);
+
+        res = parse(wildcard(MISSING_DIR));
+
+        assert res.size() == 0;
+    }
+
+    /**
+     * Test multiple wildcards.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultipleWildcards() throws Exception {
+        Collection<File> res = parse(merge(wildcard(DIR_1), wildcard(DIR_2), wildcard(MISSING_DIR)));
+
+        assert res.size() == 4;
+        assert res.contains(FILE_1_1);
+        assert res.contains(FILE_1_2);
+        assert res.contains(FILE_2_1);
+        assert res.contains(FILE_2_2);
+    }
+
+    /**
+     * Test mixed tokens.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMixed() throws Exception {
+        String str = merge(
+            single(FILE_1_1),
+            wildcard(DIR_2),
+            single(MISSING_FILE),
+            wildcard(MISSING_DIR)
+        );
+
+        Collection<File> res = parse(str);
+
+        assert res.size() == 3;
+        assert res.contains(FILE_1_1);
+        assert res.contains(FILE_2_1);
+        assert res.contains(FILE_2_2);
+    }
+    /**
+     * Ensure provided file exists.
+     *
+     * @param file File.
+     */
+    private static void ensureExists(File file) {
+        assert file.exists();
+    }
+
+    /**
+     * Ensure provided file doesn't exist.
+     *
+     * @param file File.
+     */
+    private static void ensureNotExists(File file) {
+        assert !file.exists();
+    }
+
+    /**
+     * Merge string using path separator.
+     *
+     * @param vals Values.
+     * @return Result.
+     */
+    private static String merge(String... vals) {
+        StringBuilder res = new StringBuilder();
+
+        if (vals != null) {
+            boolean first = true;
+
+            for (String val : vals) {
+                if (first)
+                    first = false;
+                else
+                    res.append(File.pathSeparatorChar);
+
+                res.append(val);
+            }
+        }
+
+        return res.toString();
+    }
+
+    /**
+     * Parse string.
+     *
+     * @param str String.
+     * @return Files.
+     * @throws IOException If failed.
+     */
+    Collection<File> parse(String str) throws IOException {
+        Collection<HadoopClasspathUtils.SearchDirectory> dirs = HadoopClasspathUtils.parseUserLibs(str);
+
+        Collection<File> res = new HashSet<>();
+
+        for (HadoopClasspathUtils.SearchDirectory dir : dirs)
+            Collections.addAll(res, dir.files());
+
+        return res;
+    }
+
+    /**
+     * Get absolute path to a single file.
+     *
+     * @param file File.
+     * @return Path.
+     */
+    private static String single(File file) {
+        return file.getAbsolutePath();
+    }
+
+    /**
+     * Create a wildcard.
+     *
+     * @param file File.
+     * @return Wildcard.
+     */
+    private static String wildcard(File file) {
+        return file.getAbsolutePath() + File.separatorChar + "*";
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java
new file mode 100644
index 0000000..ae2c00d
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.InputStream;
+import java.util.UUID;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopSerializationWrapper;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Self test of {@link org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job}.
+ */
+public class HadoopV2JobSelfTest extends HadoopAbstractSelfTest {
+    /** */
+    private static final String TEST_SERIALIZED_VALUE = "Test serialized value";
+
+    /**
+     * Custom serialization class that accepts {@link Writable}.
+     */
+    private static class CustomSerialization extends WritableSerialization {
+        /** {@inheritDoc} */
+        @Override public Deserializer<Writable> getDeserializer(Class<Writable> c) {
+            return new Deserializer<Writable>() {
+                @Override public void open(InputStream in) { }
+
+                @Override public Writable deserialize(Writable writable) {
+                    return new Text(TEST_SERIALIZED_VALUE);
+                }
+
+                @Override public void close() { }
+            };
+        }
+    }
+
+    /**
+     * Tests that {@link HadoopJob} provides wrapped serializer if it's set in configuration.
+     *
+     * @throws IgniteCheckedException If fails.
+     */
+    public void testCustomSerializationApplying() throws IgniteCheckedException {
+        JobConf cfg = new JobConf();
+
+        cfg.setMapOutputKeyClass(IntWritable.class);
+        cfg.setMapOutputValueClass(Text.class);
+        cfg.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, CustomSerialization.class.getName());
+
+        HadoopDefaultJobInfo info = createJobInfo(cfg);
+
+        final UUID uuid = UUID.randomUUID();
+
+        HadoopJobId id = new HadoopJobId(uuid, 1);
+
+        HadoopJob job = info.createJob(HadoopV2Job.class, id, log, null);
+
+        HadoopTaskContext taskCtx = job.getTaskContext(new HadoopTaskInfo(HadoopTaskType.MAP, null, 0, 0,
+            null));
+
+        HadoopSerialization ser = taskCtx.keySerialization();
+
+        assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName());
+
+        DataInput in = new DataInputStream(new ByteArrayInputStream(new byte[0]));
+
+        assertEquals(TEST_SERIALIZED_VALUE, ser.read(in, null).toString());
+
+        ser = taskCtx.valueSerialization();
+
+        assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName());
+
+        assertEquals(TEST_SERIALIZED_VALUE, ser.read(in, null).toString());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java
new file mode 100644
index 0000000..1496150
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.configuration.IgniteConfiguration;
+
+/**
+ * Configuration validation tests.
+ */
+public class HadoopValidationSelfTest extends HadoopAbstractSelfTest {
+    /** Peer class loading enabled flag. */
+    public boolean peerClassLoading;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+
+        peerClassLoading = false;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setPeerClassLoadingEnabled(peerClassLoading);
+
+        return cfg;
+    }
+
+    /**
+     * Ensure that Grid starts when all configuration parameters are valid.
+     *
+     * @throws Exception If failed.
+     */
+    public void testValid() throws Exception {
+        startGrids(1);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedMapReducePlannerTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedMapReducePlannerTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedMapReducePlannerTest.java
new file mode 100644
index 0000000..4e7cc50
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedMapReducePlannerTest.java
@@ -0,0 +1,599 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.cluster.ClusterMetrics;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.IgniteNodeAttributes;
+import org.apache.ignite.internal.processors.hadoop.planner.HadoopAbstractMapReducePlanner;
+import org.apache.ignite.internal.processors.igfs.IgfsIgniteMock;
+import org.apache.ignite.internal.processors.igfs.IgfsMock;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.lang.IgniteProductVersion;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.Nullable;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.UUID;
+
+/**
+ * Tests for weighted map-reduce planned.
+ */
+public class HadoopWeightedMapReducePlannerTest extends GridCommonAbstractTest {
+    /** ID 1. */
+    private static final UUID ID_1 = new UUID(0, 1);
+
+    /** ID 2. */
+    private static final UUID ID_2 = new UUID(0, 2);
+
+    /** ID 3. */
+    private static final UUID ID_3 = new UUID(0, 3);
+
+    /** MAC 1. */
+    private static final String MAC_1 = "mac1";
+
+    /** MAC 2. */
+    private static final String MAC_2 = "mac2";
+
+    /** MAC 3. */
+    private static final String MAC_3 = "mac3";
+
+    /** Host 1. */
+    private static final String HOST_1 = "host1";
+
+    /** Host 2. */
+    private static final String HOST_2 = "host2";
+
+    /** Host 3. */
+    private static final String HOST_3 = "host3";
+
+    /** Host 4. */
+    private static final String HOST_4 = "host4";
+
+    /** Host 5. */
+    private static final String HOST_5 = "host5";
+
+    /** Standard node 1. */
+    private static final MockNode NODE_1 = new MockNode(ID_1, MAC_1, HOST_1);
+
+    /** Standard node 2. */
+    private static final MockNode NODE_2 = new MockNode(ID_2, MAC_2, HOST_2);
+
+    /** Standard node 3. */
+    private static final MockNode NODE_3 = new MockNode(ID_3, MAC_3, HOST_3);
+
+    /** Standard nodes. */
+    private static final Collection<ClusterNode> NODES;
+
+    /**
+     * Static initializer.
+     */
+    static {
+        NODES = new ArrayList<>();
+
+        NODES.add(NODE_1);
+        NODES.add(NODE_2);
+        NODES.add(NODE_3);
+    }
+
+    /**
+     * Test one IGFS split being assigned to affinity node.
+     *
+     * @throws Exception If failed.
+     */
+    public void testOneIgfsSplitAffinity() throws Exception {
+        IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
+
+        List<HadoopInputSplit> splits = new ArrayList<>();
+
+        splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("igfs://igfs@/file"), 0, 50));
+
+        final int expReducers = 4;
+
+        HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
+
+        IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
+
+        HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
+
+        assert plan.mappers() == 1;
+        assert plan.mapperNodeIds().size() == 1;
+        assert plan.mapperNodeIds().contains(ID_1);
+
+        checkPlanMappers(plan, splits, NODES, false/*only 1 split*/);
+        checkPlanReducers(plan, NODES, expReducers, false/* because of threshold behavior.*/);
+    }
+
+    /**
+     * Test one HDFS splits.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHdfsSplitsAffinity() throws Exception {
+        IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
+
+        final List<HadoopInputSplit> splits = new ArrayList<>();
+
+        splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
+        splits.add(new HadoopFileBlock(new String[] { HOST_2 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
+        splits.add(new HadoopFileBlock(new String[] { HOST_3 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
+
+        // The following splits belong to hosts that are out of Ignite topology at all.
+        // This means that these splits should be assigned to any least loaded modes:
+        splits.add(new HadoopFileBlock(new String[] { HOST_4 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
+        splits.add(new HadoopFileBlock(new String[] { HOST_5 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
+
+        final int expReducers = 7;
+
+        HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
+
+        IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
+
+        final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
+
+        checkPlanMappers(plan, splits, NODES, true);
+
+        checkPlanReducers(plan, NODES, expReducers, true);
+    }
+
+    /**
+     * Test HDFS splits with Replication == 3.
+     *
+     * @throws Exception If failed.
+     */
+    public void testHdfsSplitsReplication() throws Exception {
+        IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
+
+        final List<HadoopInputSplit> splits = new ArrayList<>();
+
+        splits.add(new HadoopFileBlock(new String[] { HOST_1, HOST_2, HOST_3 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
+        splits.add(new HadoopFileBlock(new String[] { HOST_2, HOST_3, HOST_4 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
+        splits.add(new HadoopFileBlock(new String[] { HOST_3, HOST_4, HOST_5 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
+        // The following splits belong to hosts that are out of Ignite topology at all.
+        // This means that these splits should be assigned to any least loaded modes:
+        splits.add(new HadoopFileBlock(new String[] { HOST_4, HOST_5, HOST_1 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
+        splits.add(new HadoopFileBlock(new String[] { HOST_5, HOST_1, HOST_2 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
+
+        final int expReducers = 8;
+
+        HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
+
+        IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
+
+        final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
+
+        checkPlanMappers(plan, splits, NODES, true);
+
+        checkPlanReducers(plan, NODES, expReducers, true);
+    }
+
+    /**
+     * Get all IDs.
+     *
+     * @param nodes Nodes.
+     * @return IDs.
+     */
+    private static Set<UUID> allIds(Collection<ClusterNode> nodes) {
+        Set<UUID> allIds = new HashSet<>();
+
+        for (ClusterNode n : nodes)
+            allIds.add(n.id());
+
+        return allIds;
+    }
+
+    /**
+     * Check mappers for the plan.
+     *
+     * @param plan Plan.
+     * @param splits Splits.
+     * @param nodes Nodes.
+     * @param expectUniformity WHether uniformity is expected.
+     */
+    private static void checkPlanMappers(HadoopMapReducePlan plan, List<HadoopInputSplit> splits,
+        Collection<ClusterNode> nodes, boolean expectUniformity) {
+        // Number of mappers should correspomd to the number of input splits:
+        assertEquals(splits.size(), plan.mappers());
+
+        if (expectUniformity) {
+            // mappers are assigned to all available nodes:
+            assertEquals(nodes.size(), plan.mapperNodeIds().size());
+
+
+            assertEquals(allIds(nodes), plan.mapperNodeIds());
+        }
+
+        // Check all splits are covered by mappers:
+        Set<HadoopInputSplit> set = new HashSet<>();
+
+        for (UUID id: plan.mapperNodeIds()) {
+            Collection<HadoopInputSplit> sp = plan.mappers(id);
+
+            assert sp != null;
+
+            for (HadoopInputSplit s: sp)
+                assertTrue(set.add(s));
+        }
+
+        // must be of the same size & contain same elements:
+        assertEquals(set, new HashSet<>(splits));
+    }
+
+    /**
+     * Check plan reducers.
+     *
+     * @param plan Plan.
+     * @param nodes Nodes.
+     * @param expReducers Expected reducers.
+     * @param expectUniformity Expected uniformity.
+     */
+    private static void checkPlanReducers(HadoopMapReducePlan plan,
+        Collection<ClusterNode> nodes, int expReducers, boolean expectUniformity) {
+
+        assertEquals(expReducers, plan.reducers());
+
+        if (expectUniformity)
+            assertEquals(allIds(nodes), plan.reducerNodeIds());
+
+        int sum = 0;
+        int lenSum = 0;
+
+        for (UUID uuid: plan.reducerNodeIds()) {
+            int[] rr = plan.reducers(uuid);
+
+            assert rr != null;
+
+            lenSum += rr.length;
+
+            for (int i: rr)
+                sum += i;
+        }
+
+        assertEquals(expReducers, lenSum);
+
+        // Numbers in the arrays must be consequtive integers stating from 0,
+        // check that simply calculating their total sum:
+        assertEquals((lenSum * (lenSum - 1) / 2), sum);
+    }
+
+    /**
+     * Create planner for IGFS.
+     *
+     * @param igfs IGFS.
+     * @return Planner.
+     */
+    private static IgniteHadoopWeightedMapReducePlanner createPlanner(IgfsMock igfs) {
+        IgniteHadoopWeightedMapReducePlanner planner = new IgniteHadoopWeightedMapReducePlanner();
+
+        IgfsIgniteMock ignite = new IgfsIgniteMock(null, igfs);
+
+        GridTestUtils.setFieldValue(planner, HadoopAbstractMapReducePlanner.class, "ignite", ignite);
+
+        return planner;
+    }
+
+    /**
+     * Throw {@link UnsupportedOperationException}.
+     */
+    private static void throwUnsupported() {
+        throw new UnsupportedOperationException("Should not be called!");
+    }
+
+    /**
+     * Mocked node.
+     */
+    private static class MockNode implements ClusterNode {
+        /** ID. */
+        private final UUID id;
+
+        /** MAC addresses. */
+        private final String macs;
+
+        /** Addresses. */
+        private final List<String> addrs;
+
+        /**
+         * Constructor.
+         *
+         * @param id Node ID.
+         * @param macs MAC addresses.
+         * @param addrs Addresses.
+         */
+        public MockNode(UUID id, String macs, String... addrs) {
+            assert addrs != null;
+
+            this.id = id;
+            this.macs = macs;
+
+            this.addrs = Arrays.asList(addrs);
+        }
+
+        /** {@inheritDoc} */
+        @Override public UUID id() {
+            return id;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("unchecked")
+        @Nullable @Override public <T> T attribute(String name) {
+            if (F.eq(name, IgniteNodeAttributes.ATTR_MACS))
+                return (T)macs;
+
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<String> addresses() {
+            return addrs;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object consistentId() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public ClusterMetrics metrics() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Map<String, Object> attributes() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<String> hostNames() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public long order() {
+            throwUnsupported();
+
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public IgniteProductVersion version() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isLocal() {
+            throwUnsupported();
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isDaemon() {
+            throwUnsupported();
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isClient() {
+            throwUnsupported();
+
+            return false;
+        }
+    }
+
+    /**
+     * Locations builder.
+     */
+    private static class LocationsBuilder {
+        /** Locations. */
+        private final TreeMap<Long, Collection<MockNode>> locs = new TreeMap<>();
+
+        /**
+         * Create new locations builder.
+         *
+         * @return Locations builder.
+         */
+        public static LocationsBuilder create() {
+            return new LocationsBuilder();
+        }
+
+        /**
+         * Add locations.
+         *
+         * @param start Start.
+         * @param nodes Nodes.
+         * @return This builder for chaining.
+         */
+        public LocationsBuilder add(long start, MockNode... nodes) {
+            locs.put(start, Arrays.asList(nodes));
+
+            return this;
+        }
+
+        /**
+         * Build locations.
+         *
+         * @return Locations.
+         */
+        public TreeMap<Long, Collection<MockNode>> build() {
+            return locs;
+        }
+
+        /**
+         * Build IGFS.
+         *
+         * @return IGFS.
+         */
+        public MockIgfs buildIgfs() {
+            return new MockIgfs(build());
+        }
+    }
+
+    /**
+     * Mocked IGFS.
+     */
+    private static class MockIgfs extends IgfsMock {
+        /** Block locations. */
+        private final TreeMap<Long, Collection<MockNode>> locs;
+
+        /**
+         * Constructor.
+         *
+         * @param locs Block locations.
+         */
+        public MockIgfs(TreeMap<Long, Collection<MockNode>> locs) {
+            super("igfs");
+
+            this.locs = locs;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) {
+            Collection<IgfsBlockLocation> res = new ArrayList<>();
+
+            long cur = start;
+            long remaining = len;
+
+            long prevLocStart = -1;
+            Collection<MockNode> prevLocNodes = null;
+
+            for (Map.Entry<Long, Collection<MockNode>> locEntry : locs.entrySet()) {
+                long locStart = locEntry.getKey();
+                Collection<MockNode> locNodes = locEntry.getValue();
+
+                if (prevLocNodes != null) {
+                    if (cur < locStart) {
+                        // Add part from previous block.
+                        long prevLen = locStart - prevLocStart;
+
+                        res.add(new IgfsBlockLocationMock(cur, prevLen, prevLocNodes));
+
+                        cur = locStart;
+                        remaining -= prevLen;
+                    }
+                }
+
+                prevLocStart = locStart;
+                prevLocNodes = locNodes;
+
+                if (remaining == 0)
+                    break;
+            }
+
+            // Add remainder.
+            if (remaining != 0)
+                res.add(new IgfsBlockLocationMock(cur, remaining, prevLocNodes));
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean exists(IgfsPath path) {
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isProxy(URI path) {
+            return false;
+        }
+    }
+
+    /**
+     * Mocked block location.
+     */
+    private static class IgfsBlockLocationMock implements IgfsBlockLocation {
+        /** Start. */
+        private final long start;
+
+        /** Length. */
+        private final long len;
+
+        /** Node IDs. */
+        private final List<UUID> nodeIds;
+
+        /**
+         * Constructor.
+         *
+         * @param start Start.
+         * @param len Length.
+         * @param nodes Nodes.
+         */
+        public IgfsBlockLocationMock(long start, long len, Collection<MockNode> nodes) {
+            this.start = start;
+            this.len = len;
+
+            this.nodeIds = new ArrayList<>(nodes.size());
+
+            for (MockNode node : nodes)
+                nodeIds.add(node.id);
+        }
+
+        /** {@inheritDoc} */
+        @Override public long start() {
+            return start;
+        }
+
+        /** {@inheritDoc} */
+        @Override public long length() {
+            return len;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<UUID> nodeIds() {
+            return nodeIds;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<String> names() {
+            throwUnsupported();
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<String> hosts() {
+            throwUnsupported();
+
+            return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedPlannerMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedPlannerMapReduceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedPlannerMapReduceTest.java
new file mode 100644
index 0000000..e0403c2
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopWeightedPlannerMapReduceTest.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner;
+
+/**
+ * Tests whole map-red execution Weighted planner.
+ */
+public class HadoopWeightedPlannerMapReduceTest extends HadoopMapReduceTest {
+    /** {@inheritDoc} */
+    @Override protected HadoopConfiguration createHadoopConfiguration() {
+        HadoopConfiguration hadoopCfg = new HadoopConfiguration();
+
+        // Use weighted planner with default settings:
+        IgniteHadoopWeightedMapReducePlanner planner = new IgniteHadoopWeightedMapReducePlanner();
+
+        hadoopCfg.setMapReducePlanner(planner);
+
+        return hadoopCfg;
+    }
+}


[17/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
deleted file mode 100644
index 70f645f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.igfs.common.IgfsMessage;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * IO abstraction layer for IGFS client. Two kind of messages are expected to be sent: requests with response
- * and request without response.
- */
-public interface HadoopIgfsIo {
-    /**
-     * Sends given IGFS client message and asynchronously awaits for response.
-     *
-     * @param msg Message to send.
-     * @return Future that will be completed.
-     * @throws IgniteCheckedException If a message cannot be sent (connection is broken or client was closed).
-     */
-    public IgniteInternalFuture<IgfsMessage> send(IgfsMessage msg) throws IgniteCheckedException;
-
-    /**
-     * Sends given IGFS client message and asynchronously awaits for response. When IO detects response
-     * beginning for given message it stops reading data and passes input stream to closure which can read
-     * response in a specific way.
-     *
-     * @param msg Message to send.
-     * @param outBuf Output buffer. If {@code null}, the output buffer is not used.
-     * @param outOff Output buffer offset.
-     * @param outLen Output buffer length.
-     * @return Future that will be completed when response is returned from closure.
-     * @throws IgniteCheckedException If a message cannot be sent (connection is broken or client was closed).
-     */
-    public <T> IgniteInternalFuture<T> send(IgfsMessage msg, @Nullable byte[] outBuf, int outOff, int outLen)
-        throws IgniteCheckedException;
-
-    /**
-     * Sends given message and does not wait for response.
-     *
-     * @param msg Message to send.
-     * @throws IgniteCheckedException If send failed.
-     */
-    public void sendPlain(IgfsMessage msg) throws IgniteCheckedException;
-
-    /**
-     * Adds event listener that will be invoked when connection with server is lost or remote error has occurred.
-     * If connection is closed already, callback will be invoked synchronously inside this method.
-     *
-     * @param lsnr Event listener.
-     */
-    public void addEventListener(HadoopIgfsIpcIoListener lsnr);
-
-    /**
-     * Removes event listener that will be invoked when connection with server is lost or remote error has occurred.
-     *
-     * @param lsnr Event listener.
-     */
-    public void removeEventListener(HadoopIgfsIpcIoListener lsnr);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
deleted file mode 100644
index b0a4135..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.BufferedOutputStream;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.commons.logging.Log;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.internal.GridLoggerProxy;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.igfs.common.IgfsControlResponse;
-import org.apache.ignite.internal.igfs.common.IgfsDataInputStream;
-import org.apache.ignite.internal.igfs.common.IgfsDataOutputStream;
-import org.apache.ignite.internal.igfs.common.IgfsIpcCommand;
-import org.apache.ignite.internal.igfs.common.IgfsMarshaller;
-import org.apache.ignite.internal.igfs.common.IgfsMessage;
-import org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.GridStripedLock;
-import org.apache.ignite.internal.util.ipc.IpcEndpoint;
-import org.apache.ignite.internal.util.ipc.IpcEndpointFactory;
-import org.apache.ignite.internal.util.ipc.shmem.IpcOutOfSystemResourcesException;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentHashMap8;
-
-/**
- * IO layer implementation based on blocking IPC streams.
- */
-@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-public class HadoopIgfsIpcIo implements HadoopIgfsIo {
-    /** Logger. */
-    private final Log log;
-
-    /** Request futures map. */
-    private ConcurrentMap<Long, HadoopIgfsFuture> reqMap =
-        new ConcurrentHashMap8<>();
-
-    /** Request ID counter. */
-    private AtomicLong reqIdCnt = new AtomicLong();
-
-    /** Endpoint. */
-    private IpcEndpoint endpoint;
-
-    /** Endpoint output stream. */
-    private IgfsDataOutputStream out;
-
-    /** Protocol. */
-    private final IgfsMarshaller marsh;
-
-    /** Client reader thread. */
-    private Thread reader;
-
-    /** Lock for graceful shutdown. */
-    private final ReadWriteLock busyLock = new ReentrantReadWriteLock();
-
-    /** Stopping flag. */
-    private volatile boolean stopping;
-
-    /** Server endpoint address. */
-    private final String endpointAddr;
-
-    /** Number of open file system sessions. */
-    private final AtomicInteger activeCnt = new AtomicInteger(1);
-
-    /** Event listeners. */
-    private final Collection<HadoopIgfsIpcIoListener> lsnrs =
-        new GridConcurrentHashSet<>();
-
-    /** Cached connections. */
-    private static final ConcurrentMap<String, HadoopIgfsIpcIo> ipcCache =
-        new ConcurrentHashMap8<>();
-
-    /** Striped lock that prevents multiple instance creation in {@link #get(Log, String)}. */
-    private static final GridStripedLock initLock = new GridStripedLock(32);
-
-    /**
-     * @param endpointAddr Endpoint.
-     * @param marsh Protocol.
-     * @param log Logger to use.
-     */
-    public HadoopIgfsIpcIo(String endpointAddr, IgfsMarshaller marsh, Log log) {
-        assert endpointAddr != null;
-        assert marsh != null;
-
-        this.endpointAddr = endpointAddr;
-        this.marsh = marsh;
-        this.log = log;
-    }
-
-    /**
-     * Returns a started and valid instance of this class
-     * for a given endpoint.
-     *
-     * @param log Logger to use for new instance.
-     * @param endpoint Endpoint string.
-     * @return New or existing cached instance, which is started and operational.
-     * @throws IOException If new instance was created but failed to start.
-     */
-    public static HadoopIgfsIpcIo get(Log log, String endpoint) throws IOException {
-        while (true) {
-            HadoopIgfsIpcIo clientIo = ipcCache.get(endpoint);
-
-            if (clientIo != null) {
-                if (clientIo.acquire())
-                    return clientIo;
-                else
-                    // If concurrent close.
-                    ipcCache.remove(endpoint, clientIo);
-            }
-            else {
-                Lock lock = initLock.getLock(endpoint);
-
-                lock.lock();
-
-                try {
-                    clientIo = ipcCache.get(endpoint);
-
-                    if (clientIo != null) { // Perform double check.
-                        if (clientIo.acquire())
-                            return clientIo;
-                        else
-                            // If concurrent close.
-                            ipcCache.remove(endpoint, clientIo);
-                    }
-
-                    // Otherwise try creating a new one.
-                    clientIo = new HadoopIgfsIpcIo(endpoint, new IgfsMarshaller(), log);
-
-                    try {
-                        clientIo.start();
-                    }
-                    catch (IgniteCheckedException e) {
-                        throw new IOException(e.getMessage(), e);
-                    }
-
-                    HadoopIgfsIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo);
-
-                    // Put in exclusive lock.
-                    assert old == null;
-
-                    return clientIo;
-                }
-                finally {
-                    lock.unlock();
-                }
-            }
-        }
-    }
-
-    /**
-     * Increases usage count for this instance.
-     *
-     * @return {@code true} if usage count is greater than zero.
-     */
-    private boolean acquire() {
-        while (true) {
-            int cnt = activeCnt.get();
-
-            if (cnt == 0) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO not acquired (count was 0): " + this);
-
-                return false;
-            }
-
-            // Need to make sure that no-one decremented count in between.
-            if (activeCnt.compareAndSet(cnt, cnt + 1)) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO acquired: " + this);
-
-                return true;
-            }
-        }
-    }
-
-    /**
-     * Releases this instance, decrementing usage count.
-     * <p>
-     * If usage count becomes zero, the instance is stopped
-     * and removed from cache.
-     */
-    public void release() {
-        while (true) {
-            int cnt = activeCnt.get();
-
-            if (cnt == 0) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO not released (count was 0): " + this);
-
-                return;
-            }
-
-            if (activeCnt.compareAndSet(cnt, cnt - 1)) {
-                if (cnt == 1) {
-                    ipcCache.remove(endpointAddr, this);
-
-                    if (log.isDebugEnabled())
-                        log.debug("IPC IO stopping as unused: " + this);
-
-                    stop();
-                }
-                else if (log.isDebugEnabled())
-                    log.debug("IPC IO released: " + this);
-
-                return;
-            }
-        }
-    }
-
-    /**
-     * Closes this IO instance, removing it from cache.
-     */
-    public void forceClose() {
-        if (ipcCache.remove(endpointAddr, this))
-            stop();
-    }
-
-    /**
-     * Starts the IO.
-     *
-     * @throws IgniteCheckedException If failed to connect the endpoint.
-     */
-    private void start() throws IgniteCheckedException {
-        boolean success = false;
-
-        try {
-            endpoint = IpcEndpointFactory.connectEndpoint(
-                endpointAddr, new GridLoggerProxy(new HadoopIgfsJclLogger(log), null, null, ""));
-
-            out = new IgfsDataOutputStream(new BufferedOutputStream(endpoint.outputStream()));
-
-            reader = new ReaderThread();
-
-            // Required for Hadoop 2.x
-            reader.setDaemon(true);
-
-            reader.start();
-
-            success = true;
-        }
-        catch (IgniteCheckedException e) {
-            IpcOutOfSystemResourcesException resEx = e.getCause(IpcOutOfSystemResourcesException.class);
-
-            if (resEx != null)
-                throw new IgniteCheckedException(IpcSharedMemoryServerEndpoint.OUT_OF_RESOURCES_MSG, resEx);
-
-            throw e;
-        }
-        finally {
-            if (!success)
-                stop();
-        }
-    }
-
-    /**
-     * Shuts down the IO. No send requests will be accepted anymore, all pending futures will be failed.
-     * Close listeners will be invoked as if connection is closed by server.
-     */
-    private void stop() {
-        close0(null);
-
-        if (reader != null) {
-            try {
-                U.interrupt(reader);
-                U.join(reader);
-
-                reader = null;
-            }
-            catch (IgniteInterruptedCheckedException ignored) {
-                Thread.currentThread().interrupt();
-
-                log.warn("Got interrupted while waiting for reader thread to shut down (will return).");
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addEventListener(HadoopIgfsIpcIoListener lsnr) {
-        if (!busyLock.readLock().tryLock()) {
-            lsnr.onClose();
-
-            return;
-        }
-
-        boolean invokeNow = false;
-
-        try {
-            invokeNow = stopping;
-
-            if (!invokeNow)
-                lsnrs.add(lsnr);
-        }
-        finally {
-            busyLock.readLock().unlock();
-
-            if (invokeNow)
-                lsnr.onClose();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void removeEventListener(HadoopIgfsIpcIoListener lsnr) {
-        lsnrs.remove(lsnr);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<IgfsMessage> send(IgfsMessage msg) throws IgniteCheckedException {
-        return send(msg, null, 0, 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public <T> IgniteInternalFuture<T> send(IgfsMessage msg, @Nullable byte[] outBuf, int outOff,
-        int outLen) throws IgniteCheckedException {
-        assert outBuf == null || msg.command() == IgfsIpcCommand.READ_BLOCK;
-
-        if (!busyLock.readLock().tryLock())
-            throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " +
-                "closed).");
-
-        try {
-            if (stopping)
-                throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " +
-                    "closed).");
-
-            long reqId = reqIdCnt.getAndIncrement();
-
-            HadoopIgfsFuture<T> fut = new HadoopIgfsFuture<>();
-
-            fut.outputBuffer(outBuf);
-            fut.outputOffset(outOff);
-            fut.outputLength(outLen);
-            fut.read(msg.command() == IgfsIpcCommand.READ_BLOCK);
-
-            HadoopIgfsFuture oldFut = reqMap.putIfAbsent(reqId, fut);
-
-            assert oldFut == null;
-
-            if (log.isDebugEnabled())
-                log.debug("Sending IGFS message [reqId=" + reqId + ", msg=" + msg + ']');
-
-            byte[] hdr = IgfsMarshaller.createHeader(reqId, msg.command());
-
-            IgniteCheckedException err = null;
-
-            try {
-                synchronized (this) {
-                    marsh.marshall(msg, hdr, out);
-
-                    out.flush(); // Blocking operation + sometimes system call.
-                }
-            }
-            catch (IgniteCheckedException e) {
-                err = e;
-            }
-            catch (IOException e) {
-                err = new HadoopIgfsCommunicationException(e);
-            }
-
-            if (err != null) {
-                reqMap.remove(reqId, fut);
-
-                fut.onDone(err);
-            }
-
-            return fut;
-        }
-        finally {
-            busyLock.readLock().unlock();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void sendPlain(IgfsMessage msg) throws IgniteCheckedException {
-        if (!busyLock.readLock().tryLock())
-            throw new HadoopIgfsCommunicationException("Failed to send message (client is being " +
-                "concurrently closed).");
-
-        try {
-            if (stopping)
-                throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently closed).");
-
-            assert msg.command() == IgfsIpcCommand.WRITE_BLOCK;
-
-            IgfsStreamControlRequest req = (IgfsStreamControlRequest)msg;
-
-            byte[] hdr = IgfsMarshaller.createHeader(-1, IgfsIpcCommand.WRITE_BLOCK);
-
-            U.longToBytes(req.streamId(), hdr, 12);
-            U.intToBytes(req.length(), hdr, 20);
-
-            synchronized (this) {
-                out.write(hdr);
-                out.write(req.data(), (int)req.position(), req.length());
-
-                out.flush();
-            }
-        }
-        catch (IOException e) {
-            throw new HadoopIgfsCommunicationException(e);
-        }
-        finally {
-            busyLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Closes client but does not wait.
-     *
-     * @param err Error.
-     */
-    private void close0(@Nullable Throwable err) {
-        busyLock.writeLock().lock();
-
-        try {
-            if (stopping)
-                return;
-
-            stopping = true;
-        }
-        finally {
-            busyLock.writeLock().unlock();
-        }
-
-        if (err == null)
-            err = new IgniteCheckedException("Failed to perform request (connection was concurrently closed before response " +
-                "is received).");
-
-        // Clean up resources.
-        U.closeQuiet(out);
-
-        if (endpoint != null)
-            endpoint.close();
-
-        // Unwind futures. We can safely iterate here because no more futures will be added.
-        Iterator<HadoopIgfsFuture> it = reqMap.values().iterator();
-
-        while (it.hasNext()) {
-            HadoopIgfsFuture fut = it.next();
-
-            fut.onDone(err);
-
-            it.remove();
-        }
-
-        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
-            lsnr.onClose();
-    }
-
-    /**
-     * Do not extend {@code GridThread} to minimize class dependencies.
-     */
-    private class ReaderThread extends Thread {
-        /** {@inheritDoc} */
-        @SuppressWarnings("unchecked")
-        @Override public void run() {
-            // Error to fail pending futures.
-            Throwable err = null;
-
-            try {
-                InputStream in = endpoint.inputStream();
-
-                IgfsDataInputStream dis = new IgfsDataInputStream(in);
-
-                byte[] hdr = new byte[IgfsMarshaller.HEADER_SIZE];
-                byte[] msgHdr = new byte[IgfsControlResponse.RES_HEADER_SIZE];
-
-                while (!Thread.currentThread().isInterrupted()) {
-                    dis.readFully(hdr);
-
-                    long reqId = U.bytesToLong(hdr, 0);
-
-                    // We don't wait for write responses, therefore reqId is -1.
-                    if (reqId == -1) {
-                        // We received a response which normally should not be sent. It must contain an error.
-                        dis.readFully(msgHdr);
-
-                        assert msgHdr[4] != 0;
-
-                        String errMsg = dis.readUTF();
-
-                        // Error code.
-                        dis.readInt();
-
-                        long streamId = dis.readLong();
-
-                        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
-                            lsnr.onError(streamId, errMsg);
-                    }
-                    else {
-                        HadoopIgfsFuture<Object> fut = reqMap.remove(reqId);
-
-                        if (fut == null) {
-                            String msg = "Failed to read response from server: response closure is unavailable for " +
-                                "requestId (will close connection):" + reqId;
-
-                            log.warn(msg);
-
-                            err = new IgniteCheckedException(msg);
-
-                            break;
-                        }
-                        else {
-                            try {
-                                IgfsIpcCommand cmd = IgfsIpcCommand.valueOf(U.bytesToInt(hdr, 8));
-
-                                if (log.isDebugEnabled())
-                                    log.debug("Received IGFS response [reqId=" + reqId + ", cmd=" + cmd + ']');
-
-                                Object res = null;
-
-                                if (fut.read()) {
-                                    dis.readFully(msgHdr);
-
-                                    boolean hasErr = msgHdr[4] != 0;
-
-                                    if (hasErr) {
-                                        String errMsg = dis.readUTF();
-
-                                        // Error code.
-                                        Integer errCode = dis.readInt();
-
-                                        IgfsControlResponse.throwError(errCode, errMsg);
-                                    }
-
-                                    int blockLen = U.bytesToInt(msgHdr, 5);
-
-                                    int readLen = Math.min(blockLen, fut.outputLength());
-
-                                    if (readLen > 0) {
-                                        assert fut.outputBuffer() != null;
-
-                                        dis.readFully(fut.outputBuffer(), fut.outputOffset(), readLen);
-                                    }
-
-                                    if (readLen != blockLen) {
-                                        byte[] buf = new byte[blockLen - readLen];
-
-                                        dis.readFully(buf);
-
-                                        res = buf;
-                                    }
-                                }
-                                else
-                                    res = marsh.unmarshall(cmd, hdr, dis);
-
-                                fut.onDone(res);
-                            }
-                            catch (IgfsException | IgniteCheckedException e) {
-                                if (log.isDebugEnabled())
-                                    log.debug("Failed to apply response closure (will fail request future): " +
-                                        e.getMessage());
-
-                                fut.onDone(e);
-
-                                err = e;
-                            }
-                            catch (Throwable t) {
-                                fut.onDone(t);
-
-                                throw t;
-                            }
-                        }
-                    }
-                }
-            }
-            catch (EOFException ignored) {
-                err = new IgniteCheckedException("Failed to read response from server (connection was closed by remote peer).");
-            }
-            catch (IOException e) {
-                if (!stopping)
-                    log.error("Failed to read data (connection will be closed)", e);
-
-                err = new HadoopIgfsCommunicationException(e);
-            }
-            catch (Throwable e) {
-                if (!stopping)
-                    log.error("Failed to obtain endpoint input stream (connection will be closed)", e);
-
-                err = e;
-
-                if (e instanceof Error)
-                    throw (Error)e;
-            }
-            finally {
-                close0(err);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return getClass().getSimpleName() + " [endpointAddr=" + endpointAddr + ", activeCnt=" + activeCnt +
-            ", stopping=" + stopping + ']';
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
deleted file mode 100644
index c26e896..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-/**
- * Listens to the events of {@link HadoopIgfsIpcIo}.
- */
-public interface HadoopIgfsIpcIoListener {
-    /**
-     * Callback invoked when the IO is being closed.
-     */
-    public void onClose();
-
-    /**
-     * Callback invoked when remote error occurs.
-     *
-     * @param streamId Stream ID.
-     * @param errMsg Error message.
-     */
-    public void onError(long streamId, String errMsg);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
deleted file mode 100644
index 3a7f45b..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * JCL logger wrapper for Hadoop.
- */
-public class HadoopIgfsJclLogger implements IgniteLogger {
-    /** JCL implementation proxy. */
-    @GridToStringInclude
-    private Log impl;
-
-    /**
-     * Constructor.
-     *
-     * @param impl JCL implementation to use.
-     */
-    HadoopIgfsJclLogger(Log impl) {
-        assert impl != null;
-
-        this.impl = impl;
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteLogger getLogger(Object ctgr) {
-        return new HadoopIgfsJclLogger(LogFactory.getLog(
-            ctgr instanceof Class ? ((Class)ctgr).getName() : String.valueOf(ctgr)));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void trace(String msg) {
-        impl.trace(msg);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void debug(String msg) {
-        impl.debug(msg);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void info(String msg) {
-        impl.info(msg);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void warning(String msg) {
-        impl.warn(msg);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void warning(String msg, @Nullable Throwable e) {
-        impl.warn(msg, e);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void error(String msg) {
-        impl.error(msg);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isQuiet() {
-        return !isInfoEnabled() && !isDebugEnabled();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void error(String msg, @Nullable Throwable e) {
-        impl.error(msg, e);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isTraceEnabled() {
-        return impl.isTraceEnabled();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isDebugEnabled() {
-        return impl.isDebugEnabled();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isInfoEnabled() {
-        return impl.isInfoEnabled();
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public String fileName() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopIgfsJclLogger.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
deleted file mode 100644
index 9902142..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.igfs.common.IgfsControlResponse;
-import org.apache.ignite.internal.igfs.common.IgfsHandshakeRequest;
-import org.apache.ignite.internal.igfs.common.IgfsMessage;
-import org.apache.ignite.internal.igfs.common.IgfsPathControlRequest;
-import org.apache.ignite.internal.igfs.common.IgfsStatusRequest;
-import org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsInputStreamDescriptor;
-import org.apache.ignite.internal.processors.igfs.IgfsStatus;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.future.GridFinishedFuture;
-import org.apache.ignite.internal.util.lang.GridClosureException;
-import org.apache.ignite.lang.IgniteClosure;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentHashMap8;
-
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.AFFINITY;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.CLOSE;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.DELETE;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.INFO;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.LIST_FILES;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.LIST_PATHS;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.MAKE_DIRECTORIES;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_APPEND;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_CREATE;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_READ;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.PATH_SUMMARY;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.READ_BLOCK;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.RENAME;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.SET_TIMES;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.UPDATE;
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.WRITE_BLOCK;
-
-/**
- * Communication with external process (TCP or shmem).
- */
-public class HadoopIgfsOutProc implements HadoopIgfsEx, HadoopIgfsIpcIoListener {
-    /** Expected result is boolean. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, Boolean> BOOL_RES = createClosure();
-
-    /** Expected result is boolean. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, Long> LONG_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsFile> FILE_RES = createClosure();
-
-    /** Expected result is {@code IgfsHandshakeResponse} */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
-        IgfsHandshakeResponse> HANDSHAKE_RES = createClosure();
-
-    /** Expected result is {@code IgfsStatus} */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsStatus> STATUS_RES =
-        createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
-        IgfsInputStreamDescriptor> STREAM_DESCRIPTOR_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
-        Collection<IgfsFile>> FILE_COL_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
-        Collection<IgfsPath>> PATH_COL_RES = createClosure();
-
-    /** Expected result is {@code IgfsPathSummary}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsPathSummary> SUMMARY_RES =
-        createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
-        Collection<IgfsBlockLocation>> BLOCK_LOCATION_COL_RES = createClosure();
-
-    /** Grid name. */
-    private final String grid;
-
-    /** IGFS name. */
-    private final String igfs;
-
-    /** The user this out proc is performing on behalf of. */
-    private final String userName;
-
-    /** Client log. */
-    private final Log log;
-
-    /** Client IO. */
-    private final HadoopIgfsIpcIo io;
-
-    /** Event listeners. */
-    private final Map<Long, HadoopIgfsStreamEventListener> lsnrs = new ConcurrentHashMap8<>();
-
-    /**
-     * Constructor for TCP endpoint.
-     *
-     * @param host Host.
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    public HadoopIgfsOutProc(String host, int port, String grid, String igfs, Log log, String user) throws IOException {
-        this(host, port, grid, igfs, false, log, user);
-    }
-
-    /**
-     * Constructor for shmem endpoint.
-     *
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    public HadoopIgfsOutProc(int port, String grid, String igfs, Log log, String user) throws IOException {
-        this(null, port, grid, igfs, true, log, user);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param host Host.
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param shmem Shared memory flag.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    private HadoopIgfsOutProc(String host, int port, String grid, String igfs, boolean shmem, Log log, String user)
-        throws IOException {
-        assert host != null && !shmem || host == null && shmem :
-            "Invalid arguments [host=" + host + ", port=" + port + ", shmem=" + shmem + ']';
-
-        String endpoint = host != null ? host + ":" + port : "shmem:" + port;
-
-        this.grid = grid;
-        this.igfs = igfs;
-        this.log = log;
-        this.userName = IgfsUtils.fixUserName(user);
-
-        io = HadoopIgfsIpcIo.get(log, endpoint);
-
-        io.addEventListener(this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsHandshakeResponse handshake(String logDir) throws IgniteCheckedException {
-        final IgfsHandshakeRequest req = new IgfsHandshakeRequest();
-
-        req.gridName(grid);
-        req.igfsName(igfs);
-        req.logDirectory(logDir);
-
-        return io.send(req).chain(HANDSHAKE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close(boolean force) {
-        assert io != null;
-
-        io.removeEventListener(this);
-
-        if (force)
-            io.forceClose();
-        else
-            io.release();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(IgfsPath path) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(INFO);
-        msg.path(path);
-        msg.userName(userName);
-
-        return io.send(msg).chain(FILE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(UPDATE);
-        msg.path(path);
-        msg.properties(props);
-        msg.userName(userName);
-
-        return io.send(msg).chain(FILE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(SET_TIMES);
-        msg.path(path);
-        msg.accessTime(accessTime);
-        msg.modificationTime(modificationTime);
-        msg.userName(userName);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean rename(IgfsPath src, IgfsPath dest) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(RENAME);
-        msg.path(src);
-        msg.destinationPath(dest);
-        msg.userName(userName);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean delete(IgfsPath path, boolean recursive) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(DELETE);
-        msg.path(path);
-        msg.flag(recursive);
-        msg.userName(userName);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len)
-        throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(AFFINITY);
-        msg.path(path);
-        msg.start(start);
-        msg.length(len);
-        msg.userName(userName);
-
-        return io.send(msg).chain(BLOCK_LOCATION_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsPathSummary contentSummary(IgfsPath path) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(PATH_SUMMARY);
-        msg.path(path);
-        msg.userName(userName);
-
-        return io.send(msg).chain(SUMMARY_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean mkdirs(IgfsPath path, Map<String, String> props) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(MAKE_DIRECTORIES);
-        msg.path(path);
-        msg.properties(props);
-        msg.userName(userName);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(LIST_FILES);
-        msg.path(path);
-        msg.userName(userName);
-
-        return io.send(msg).chain(FILE_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(LIST_PATHS);
-        msg.path(path);
-        msg.userName(userName);
-
-        return io.send(msg).chain(PATH_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsStatus fsStatus() throws IgniteCheckedException {
-        return io.send(new IgfsStatusRequest()).chain(STATUS_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_READ);
-        msg.path(path);
-        msg.flag(false);
-        msg.userName(userName);
-
-        IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length());
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(IgfsPath path,
-        int seqReadsBeforePrefetch) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_READ);
-        msg.path(path);
-        msg.flag(true);
-        msg.sequentialReadsBeforePrefetch(seqReadsBeforePrefetch);
-        msg.userName(userName);
-
-        IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length());
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate,
-        int replication, long blockSize, @Nullable Map<String, String> props) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_CREATE);
-        msg.path(path);
-        msg.flag(overwrite);
-        msg.colocate(colocate);
-        msg.properties(props);
-        msg.replication(replication);
-        msg.blockSize(blockSize);
-        msg.userName(userName);
-
-        Long streamId = io.send(msg).chain(LONG_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, streamId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create,
-        @Nullable Map<String, String> props) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_APPEND);
-        msg.path(path);
-        msg.flag(create);
-        msg.properties(props);
-        msg.userName(userName);
-
-        Long streamId = io.send(msg).chain(LONG_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, streamId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate desc, long pos, int len,
-        final @Nullable byte[] outBuf, final int outOff, final int outLen) {
-        assert len > 0;
-
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(READ_BLOCK);
-        msg.streamId((long) desc.target());
-        msg.position(pos);
-        msg.length(len);
-
-        try {
-            return io.send(msg, outBuf, outOff, outLen);
-        }
-        catch (IgniteCheckedException e) {
-            return new GridFinishedFuture<>(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeData(HadoopIgfsStreamDelegate desc, byte[] data, int off, int len)
-        throws IOException {
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(WRITE_BLOCK);
-        msg.streamId((long) desc.target());
-        msg.data(data);
-        msg.position(off);
-        msg.length(len);
-
-        try {
-            io.sendPlain(msg);
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException {
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(CLOSE);
-        msg.streamId((long)desc.target());
-
-        try {
-            io.send(msg).chain(BOOL_RES).get();
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addEventListener(HadoopIgfsStreamDelegate desc,
-        HadoopIgfsStreamEventListener lsnr) {
-        long streamId = desc.target();
-
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(streamId, lsnr);
-
-        assert lsnr0 == null || lsnr0 == lsnr;
-
-        if (log.isDebugEnabled())
-            log.debug("Added stream event listener [streamId=" + streamId + ']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public void removeEventListener(HadoopIgfsStreamDelegate desc) {
-        long streamId = desc.target();
-
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(streamId);
-
-        if (lsnr0 != null && log.isDebugEnabled())
-            log.debug("Removed stream event listener [streamId=" + streamId + ']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onClose() {
-        for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) {
-            try {
-                lsnr.onClose();
-            }
-            catch (IgniteCheckedException e) {
-                log.warn("Got exception from stream event listener (will ignore): " + lsnr, e);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onError(long streamId, String errMsg) {
-        HadoopIgfsStreamEventListener lsnr = lsnrs.get(streamId);
-
-        if (lsnr != null)
-            lsnr.onError(errMsg);
-        else
-            log.warn("Received write error response for not registered output stream (will ignore) " +
-                "[streamId= " + streamId + ']');
-    }
-
-    /**
-     * Creates conversion closure for given type.
-     *
-     * @param <T> Type of expected result.
-     * @return Conversion closure.
-     */
-    @SuppressWarnings("unchecked")
-    private static <T> IgniteClosure<IgniteInternalFuture<IgfsMessage>, T> createClosure() {
-        return new IgniteClosure<IgniteInternalFuture<IgfsMessage>, T>() {
-            @Override public T apply(IgniteInternalFuture<IgfsMessage> fut) {
-                try {
-                    IgfsControlResponse res = (IgfsControlResponse)fut.get();
-
-                    if (res.hasError())
-                        res.throwError();
-
-                    return (T)res.response();
-                }
-                catch (IgfsException | IgniteCheckedException e) {
-                    throw new GridClosureException(e);
-                }
-            }
-        };
-    }
-
-    /** {@inheritDoc} */
-    @Override public String user() {
-        return userName;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
deleted file mode 100644
index 8f7458b..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import org.apache.commons.logging.Log;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.jetbrains.annotations.NotNull;
-
-/**
- * IGFS Hadoop output stream implementation.
- */
-public class HadoopIgfsOutputStream extends OutputStream implements HadoopIgfsStreamEventListener {
-    /** Log instance. */
-    private Log log;
-
-    /** Client logger. */
-    private IgfsLogger clientLog;
-
-    /** Log stream ID. */
-    private long logStreamId;
-
-    /** Server stream delegate. */
-    private HadoopIgfsStreamDelegate delegate;
-
-    /** Closed flag. */
-    private volatile boolean closed;
-
-    /** Flag set if stream was closed due to connection breakage. */
-    private boolean connBroken;
-
-    /** Error message. */
-    private volatile String errMsg;
-
-    /** Read time. */
-    private long writeTime;
-
-    /** User time. */
-    private long userTime;
-
-    /** Last timestamp. */
-    private long lastTs;
-
-    /** Amount of written bytes. */
-    private long total;
-
-    /**
-     * Creates light output stream.
-     *
-     * @param delegate Server stream delegate.
-     * @param log Logger to use.
-     * @param clientLog Client logger.
-     */
-    public HadoopIgfsOutputStream(HadoopIgfsStreamDelegate delegate, Log log,
-        IgfsLogger clientLog, long logStreamId) {
-        this.delegate = delegate;
-        this.log = log;
-        this.clientLog = clientLog;
-        this.logStreamId = logStreamId;
-
-        lastTs = System.nanoTime();
-
-        delegate.hadoop().addEventListener(delegate, this);
-    }
-
-    /**
-     * Read start.
-     */
-    private void writeStart() {
-        long now = System.nanoTime();
-
-        userTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /**
-     * Read end.
-     */
-    private void writeEnd() {
-        long now = System.nanoTime();
-
-        writeTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(@NotNull byte[] b, int off, int len) throws IOException {
-        check();
-
-        writeStart();
-
-        try {
-            delegate.hadoop().writeData(delegate, b, off, len);
-
-            total += len;
-        }
-        finally {
-            writeEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(int b) throws IOException {
-        write(new byte[] {(byte)b});
-
-        total++;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void flush() throws IOException {
-        delegate.hadoop().flush(delegate);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IOException {
-        if (!closed) {
-            if (log.isDebugEnabled())
-                log.debug("Closing output stream: " + delegate);
-
-            writeStart();
-
-            delegate.hadoop().closeStream(delegate);
-
-            markClosed(false);
-
-            writeEnd();
-
-            if (clientLog.isLogEnabled())
-                clientLog.logCloseOut(logStreamId, userTime, writeTime, total);
-
-            if (log.isDebugEnabled())
-                log.debug("Closed output stream [delegate=" + delegate + ", writeTime=" + writeTime / 1000 +
-                    ", userTime=" + userTime / 1000 + ']');
-        }
-        else if(connBroken)
-            throw new IOException(
-                "Failed to close stream, because connection was broken (data could have been lost).");
-    }
-
-    /**
-     * Marks stream as closed.
-     *
-     * @param connBroken {@code True} if connection with server was lost.
-     */
-    private void markClosed(boolean connBroken) {
-        // It is ok to have race here.
-        if (!closed) {
-            closed = true;
-
-            delegate.hadoop().removeEventListener(delegate);
-
-            this.connBroken = connBroken;
-        }
-    }
-
-    /**
-     * @throws IOException If check failed.
-     */
-    private void check() throws IOException {
-        String errMsg0 = errMsg;
-
-        if (errMsg0 != null)
-            throw new IOException(errMsg0);
-
-        if (closed) {
-            if (connBroken)
-                throw new IOException("Server connection was lost.");
-            else
-                throw new IOException("Stream is closed.");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onClose() throws IgniteCheckedException {
-        markClosed(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onError(String errMsg) {
-        this.errMsg = errMsg;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
deleted file mode 100644
index 90f6bca..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.util.Map;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-
-/**
- * Hadoop file system properties.
- */
-public class HadoopIgfsProperties {
-    /** Username. */
-    private String usrName;
-
-    /** Group name. */
-    private String grpName;
-
-    /** Permissions. */
-    private FsPermission perm;
-
-    /**
-     * Constructor.
-     *
-     * @param props Properties.
-     * @throws IgniteException In case of error.
-     */
-    public HadoopIgfsProperties(Map<String, String> props) throws IgniteException {
-        usrName = props.get(IgfsUtils.PROP_USER_NAME);
-        grpName = props.get(IgfsUtils.PROP_GROUP_NAME);
-
-        String permStr = props.get(IgfsUtils.PROP_PERMISSION);
-
-        if (permStr != null) {
-            try {
-                perm = new FsPermission((short)Integer.parseInt(permStr, 8));
-            }
-            catch (NumberFormatException ignore) {
-                throw new IgniteException("Permissions cannot be parsed: " + permStr);
-            }
-        }
-    }
-
-    /**
-     * Get user name.
-     *
-     * @return User name.
-     */
-    public String userName() {
-        return usrName;
-    }
-
-    /**
-     * Get group name.
-     *
-     * @return Group name.
-     */
-    public String groupName() {
-        return grpName;
-    }
-
-    /**
-     * Get permission.
-     *
-     * @return Permission.
-     */
-    public FsPermission permission() {
-        return perm;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
deleted file mode 100644
index 5cee947..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.io.InputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-
-/**
- * Secondary Hadoop file system input stream wrapper.
- */
-public class HadoopIgfsProxyInputStream extends InputStream implements Seekable, PositionedReadable {
-    /** Actual input stream to the secondary file system. */
-    private final FSDataInputStream is;
-
-    /** Client logger. */
-    private final IgfsLogger clientLog;
-
-    /** Log stream ID. */
-    private final long logStreamId;
-
-    /** Read time. */
-    private long readTime;
-
-    /** User time. */
-    private long userTime;
-
-    /** Last timestamp. */
-    private long lastTs;
-
-    /** Amount of read bytes. */
-    private long total;
-
-    /** Closed flag. */
-    private boolean closed;
-
-    /**
-     * Constructor.
-     *
-     * @param is Actual input stream to the secondary file system.
-     * @param clientLog Client log.
-     */
-    public HadoopIgfsProxyInputStream(FSDataInputStream is, IgfsLogger clientLog, long logStreamId) {
-        assert is != null;
-        assert clientLog != null;
-
-        this.is = is;
-        this.clientLog = clientLog;
-        this.logStreamId = logStreamId;
-
-        lastTs = System.nanoTime();
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read(byte[] b) throws IOException {
-        readStart();
-
-        int res;
-
-        try {
-            res = is.read(b);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (res != -1)
-            total += res;
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read(byte[] b, int off, int len) throws IOException {
-        readStart();
-
-        int res;
-
-        try {
-            res = super.read(b, off, len);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (res != -1)
-            total += res;
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized long skip(long n) throws IOException {
-        readStart();
-
-        long res;
-
-        try {
-            res =  is.skip(n);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (clientLog.isLogEnabled())
-            clientLog.logSkip(logStreamId, res);
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int available() throws IOException {
-        readStart();
-
-        try {
-            return is.available();
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void close() throws IOException {
-        if (!closed) {
-            closed = true;
-
-            readStart();
-
-            try {
-                is.close();
-            }
-            finally {
-                readEnd();
-            }
-
-            if (clientLog.isLogEnabled())
-                clientLog.logCloseIn(logStreamId, userTime, readTime, total);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void mark(int readLimit) {
-        readStart();
-
-        try {
-            is.mark(readLimit);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (clientLog.isLogEnabled())
-            clientLog.logMark(logStreamId, readLimit);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void reset() throws IOException {
-        readStart();
-
-        try {
-            is.reset();
-        }
-        finally {
-            readEnd();
-        }
-
-        if (clientLog.isLogEnabled())
-            clientLog.logReset(logStreamId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized boolean markSupported() {
-        readStart();
-
-        try {
-            return is.markSupported();
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read() throws IOException {
-        readStart();
-
-        int res;
-
-        try {
-            res = is.read();
-        }
-        finally {
-            readEnd();
-        }
-
-        if (res != -1)
-            total++;
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized int read(long pos, byte[] buf, int off, int len) throws IOException {
-        readStart();
-
-        int res;
-
-        try {
-            res = is.read(pos, buf, off, len);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (res != -1)
-            total += res;
-
-        if (clientLog.isLogEnabled())
-            clientLog.logRandomRead(logStreamId, pos, res);
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void readFully(long pos, byte[] buf, int off, int len) throws IOException {
-        readStart();
-
-        try {
-            is.readFully(pos, buf, off, len);
-        }
-        finally {
-            readEnd();
-        }
-
-        total += len;
-
-        if (clientLog.isLogEnabled())
-            clientLog.logRandomRead(logStreamId, pos, len);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void readFully(long pos, byte[] buf) throws IOException {
-        readStart();
-
-        try {
-            is.readFully(pos, buf);
-        }
-        finally {
-            readEnd();
-        }
-
-        total += buf.length;
-
-        if (clientLog.isLogEnabled())
-            clientLog.logRandomRead(logStreamId, pos, buf.length);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void seek(long pos) throws IOException {
-        readStart();
-
-        try {
-            is.seek(pos);
-        }
-        finally {
-            readEnd();
-        }
-
-        if (clientLog.isLogEnabled())
-            clientLog.logSeek(logStreamId, pos);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized long getPos() throws IOException {
-        readStart();
-
-        try {
-            return is.getPos();
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-        readStart();
-
-        try {
-            return is.seekToNewSource(targetPos);
-        }
-        finally {
-            readEnd();
-        }
-    }
-
-    /**
-     * Read start.
-     */
-    private void readStart() {
-        long now = System.nanoTime();
-
-        userTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /**
-     * Read end.
-     */
-    private void readEnd() {
-        long now = System.nanoTime();
-
-        readTime += now - lastTs;
-
-        lastTs = now;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
deleted file mode 100644
index eade0f0..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-
-/**
- * Secondary Hadoop file system output stream wrapper.
- */
-public class HadoopIgfsProxyOutputStream extends OutputStream {
-    /** Actual output stream. */
-    private FSDataOutputStream os;
-
-    /** Client logger. */
-    private final IgfsLogger clientLog;
-
-    /** Log stream ID. */
-    private final long logStreamId;
-
-    /** Read time. */
-    private long writeTime;
-
-    /** User time. */
-    private long userTime;
-
-    /** Last timestamp. */
-    private long lastTs;
-
-    /** Amount of written bytes. */
-    private long total;
-
-    /** Closed flag. */
-    private boolean closed;
-
-    /**
-     * Constructor.
-     *
-     * @param os Actual output stream.
-     * @param clientLog Client logger.
-     * @param logStreamId Log stream ID.
-     */
-    public HadoopIgfsProxyOutputStream(FSDataOutputStream os, IgfsLogger clientLog, long logStreamId) {
-        assert os != null;
-        assert clientLog != null;
-
-        this.os = os;
-        this.clientLog = clientLog;
-        this.logStreamId = logStreamId;
-
-        lastTs = System.nanoTime();
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void write(int b) throws IOException {
-        writeStart();
-
-        try {
-            os.write(b);
-        }
-        finally {
-            writeEnd();
-        }
-
-        total++;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void write(byte[] b) throws IOException {
-        writeStart();
-
-        try {
-            os.write(b);
-        }
-        finally {
-            writeEnd();
-        }
-
-        total += b.length;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void write(byte[] b, int off, int len) throws IOException {
-        writeStart();
-
-        try {
-            os.write(b, off, len);
-        }
-        finally {
-            writeEnd();
-        }
-
-        total += len;
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void flush() throws IOException {
-        writeStart();
-
-        try {
-            os.flush();
-        }
-        finally {
-            writeEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void close() throws IOException {
-        if (!closed) {
-            closed = true;
-
-            writeStart();
-
-            try {
-                os.close();
-            }
-            finally {
-                writeEnd();
-            }
-
-            if (clientLog.isLogEnabled())
-                clientLog.logCloseOut(logStreamId, userTime, writeTime, total);
-        }
-    }
-
-    /**
-     * Read start.
-     */
-    private void writeStart() {
-        long now = System.nanoTime();
-
-        userTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /**
-     * Read end.
-     */
-    private void writeEnd() {
-        long now = System.nanoTime();
-
-        writeTime += now - lastTs;
-
-        lastTs = now;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
deleted file mode 100644
index a0577ce..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Secondary file system input stream wrapper which actually opens input stream only in case it is explicitly
- * requested.
- * <p>
- * The class is expected to be used only from synchronized context and therefore is not tread-safe.
- */
-public class HadoopIgfsSecondaryFileSystemPositionedReadable implements IgfsSecondaryFileSystemPositionedReadable {
-    /** Secondary file system. */
-    private final FileSystem fs;
-
-    /** Path to the file to open. */
-    private final Path path;
-
-    /** Buffer size. */
-    private final int bufSize;
-
-    /** Actual input stream. */
-    private FSDataInputStream in;
-
-    /** Cached error occurred during output stream open. */
-    private IOException err;
-
-    /** Flag indicating that the stream was already opened. */
-    private boolean opened;
-
-    /**
-     * Constructor.
-     *
-     * @param fs Secondary file system.
-     * @param path Path to the file to open.
-     * @param bufSize Buffer size.
-     */
-    public HadoopIgfsSecondaryFileSystemPositionedReadable(FileSystem fs, Path path, int bufSize) {
-        assert fs != null;
-        assert path != null;
-
-        this.fs = fs;
-        this.path = path;
-        this.bufSize = bufSize;
-    }
-
-    /** Get input stream. */
-    private PositionedReadable in() throws IOException {
-        if (opened) {
-            if (err != null)
-                throw err;
-        }
-        else {
-            opened = true;
-
-            try {
-                in = fs.open(path, bufSize);
-
-                if (in == null)
-                    throw new IOException("Failed to open input stream (file system returned null): " + path);
-            }
-            catch (IOException e) {
-                err = e;
-
-                throw err;
-            }
-        }
-
-        return in;
-    }
-
-    /**
-     * Close wrapped input stream in case it was previously opened.
-     */
-    @Override public void close() {
-        U.closeQuiet(in);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int read(long pos, byte[] buf, int off, int len) throws IOException {
-        return in().read(pos, buf, off, len);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
deleted file mode 100644
index 37b58ab..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-/**
- * IGFS Hadoop stream descriptor.
- */
-public class HadoopIgfsStreamDelegate {
-    /** RPC handler. */
-    private final HadoopIgfsEx hadoop;
-
-    /** Target. */
-    private final Object target;
-
-    /** Optional stream length. */
-    private final long len;
-
-    /**
-     * Constructor.
-     *
-     * @param target Target.
-     */
-    public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target) {
-        this(hadoop, target, -1);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param target Target.
-     * @param len Optional length.
-     */
-    public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target, long len) {
-        assert hadoop != null;
-        assert target != null;
-
-        this.hadoop = hadoop;
-        this.target = target;
-        this.len = len;
-    }
-
-    /**
-     * @return RPC handler.
-     */
-    public HadoopIgfsEx hadoop() {
-        return hadoop;
-    }
-
-    /**
-     * @return Stream target.
-     */
-    @SuppressWarnings("unchecked")
-    public <T> T target() {
-        return (T) target;
-    }
-
-    /**
-     * @return Length.
-     */
-    public long length() {
-        return len;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return System.identityHashCode(target);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object obj) {
-        return obj != null && obj instanceof HadoopIgfsStreamDelegate &&
-            target == ((HadoopIgfsStreamDelegate)obj).target;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(HadoopIgfsStreamDelegate.class, this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
deleted file mode 100644
index d81f765..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import org.apache.ignite.IgniteCheckedException;
-
-/**
- * IGFS input stream event listener.
- */
-public interface HadoopIgfsStreamEventListener {
-    /**
-     * Callback invoked when the stream is being closed.
-     *
-     * @throws IgniteCheckedException If failed.
-     */
-    public void onClose() throws IgniteCheckedException;
-
-    /**
-     * Callback invoked when remote error occurs.
-     *
-     * @param errMsg Error message.
-     */
-    public void onError(String errMsg);
-}
\ No newline at end of file


[35/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
new file mode 100644
index 0000000..bb155b4
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsBlockKey;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.processors.igfs.IgfsEntryInfo;
+import org.apache.ignite.internal.processors.igfs.IgfsImpl;
+import org.apache.ignite.internal.processors.igfs.IgfsMetaManager;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.Callable;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
+import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.awaitFileClose;
+import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.clear;
+import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.create;
+
+/**
+ * Tests for IGFS working in mode when remote file system exists: DUAL_SYNC, DUAL_ASYNC.
+ */
+public abstract class HadoopIgfsDualAbstractSelfTest extends IgfsCommonAbstractTest {
+    /** IGFS block size. */
+    protected static final int IGFS_BLOCK_SIZE = 512 * 1024;
+
+    /** Amount of blocks to prefetch. */
+    protected static final int PREFETCH_BLOCKS = 1;
+
+    /** Amount of sequential block reads before prefetch is triggered. */
+    protected static final int SEQ_READS_BEFORE_PREFETCH = 2;
+
+    /** Secondary file system URI. */
+    protected static final String SECONDARY_URI = "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/";
+
+    /** Secondary file system configuration path. */
+    protected static final String SECONDARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml";
+
+    /** Primary file system URI. */
+    protected static final String PRIMARY_URI = "igfs://igfs:grid@/";
+
+    /** Primary file system configuration path. */
+    protected static final String PRIMARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback.xml";
+
+    /** Primary file system REST endpoint configuration map. */
+    protected static final IgfsIpcEndpointConfiguration PRIMARY_REST_CFG;
+
+    /** Secondary file system REST endpoint configuration map. */
+    protected static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG;
+
+    /** Directory. */
+    protected static final IgfsPath DIR = new IgfsPath("/dir");
+
+    /** Sub-directory. */
+    protected static final IgfsPath SUBDIR = new IgfsPath(DIR, "subdir");
+
+    /** File. */
+    protected static final IgfsPath FILE = new IgfsPath(SUBDIR, "file");
+
+    /** Default data chunk (128 bytes). */
+    protected static byte[] chunk;
+
+    /** Primary IGFS. */
+    protected static IgfsImpl igfs;
+
+    /** Secondary IGFS. */
+    protected static IgfsImpl igfsSecondary;
+
+    /** IGFS mode. */
+    protected final IgfsMode mode;
+
+    static {
+        PRIMARY_REST_CFG = new IgfsIpcEndpointConfiguration();
+
+        PRIMARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
+        PRIMARY_REST_CFG.setPort(10500);
+
+        SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration();
+
+        SECONDARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
+        SECONDARY_REST_CFG.setPort(11500);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param mode IGFS mode.
+     */
+    protected HadoopIgfsDualAbstractSelfTest(IgfsMode mode) {
+        this.mode = mode;
+        assert mode == DUAL_SYNC || mode == DUAL_ASYNC;
+    }
+
+    /**
+     * Start grid with IGFS.
+     *
+     * @param gridName Grid name.
+     * @param igfsName IGFS name
+     * @param mode IGFS mode.
+     * @param secondaryFs Secondary file system (optional).
+     * @param restCfg Rest configuration string (optional).
+     * @return Started grid instance.
+     * @throws Exception If failed.
+     */
+    protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode,
+        @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable IgfsIpcEndpointConfiguration restCfg) throws Exception {
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("dataCache");
+        igfsCfg.setMetaCacheName("metaCache");
+        igfsCfg.setName(igfsName);
+        igfsCfg.setBlockSize(IGFS_BLOCK_SIZE);
+        igfsCfg.setDefaultMode(mode);
+        igfsCfg.setIpcEndpointConfiguration(restCfg);
+        igfsCfg.setSecondaryFileSystem(secondaryFs);
+        igfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS);
+        igfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH);
+
+        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
+
+        dataCacheCfg.setName("dataCache");
+        dataCacheCfg.setCacheMode(PARTITIONED);
+        dataCacheCfg.setNearConfiguration(null);
+        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
+        dataCacheCfg.setBackups(0);
+        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
+        dataCacheCfg.setOffHeapMaxMemory(0);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("metaCache");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setConnectorConfiguration(null);
+
+        return G.start(cfg);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        chunk = new byte[128];
+
+        for (int i = 0; i < chunk.length; i++)
+            chunk[i] = (byte)i;
+
+        Ignite igniteSecondary = startGridWithIgfs("grid-secondary", "igfs-secondary", PRIMARY, null, SECONDARY_REST_CFG);
+
+        IgfsSecondaryFileSystem hadoopFs = new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG);
+
+        Ignite ignite = startGridWithIgfs("grid", "igfs", mode, hadoopFs, PRIMARY_REST_CFG);
+
+        igfsSecondary = (IgfsImpl) igniteSecondary.fileSystem("igfs-secondary");
+        igfs = (IgfsImpl) ignite.fileSystem("igfs");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        clear(igfs);
+        clear(igfsSecondary);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+    }
+
+    /**
+     * Convenient method to group paths.
+     *
+     * @param paths Paths to group.
+     * @return Paths as array.
+     */
+    protected IgfsPath[] paths(IgfsPath... paths) {
+        return paths;
+    }
+
+    /**
+     * Check how prefetch override works.
+     *
+     * @throws Exception IF failed.
+     */
+    public void testOpenPrefetchOverride() throws Exception {
+        create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
+
+        // Write enough data to the secondary file system.
+        final int blockSize = IGFS_BLOCK_SIZE;
+
+        IgfsOutputStream out = igfsSecondary.append(FILE, false);
+
+        int totalWritten = 0;
+
+        while (totalWritten < blockSize * 2 + chunk.length) {
+            out.write(chunk);
+
+            totalWritten += chunk.length;
+        }
+
+        out.close();
+
+        awaitFileClose(igfsSecondary, FILE);
+
+        // Instantiate file system with overridden "seq reads before prefetch" property.
+        Configuration cfg = new Configuration();
+
+        cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
+
+        int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
+
+        cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);
+
+        FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
+
+        // Read the first two blocks.
+        Path fsHome = new Path(PRIMARY_URI);
+        Path dir = new Path(fsHome, DIR.name());
+        Path subdir = new Path(dir, SUBDIR.name());
+        Path file = new Path(subdir, FILE.name());
+
+        FSDataInputStream fsIn = fs.open(file);
+
+        final byte[] readBuf = new byte[blockSize * 2];
+
+        fsIn.readFully(0, readBuf, 0, readBuf.length);
+
+        // Wait for a while for prefetch to finish (if any).
+        IgfsMetaManager meta = igfs.context().meta();
+
+        IgfsEntryInfo info = meta.info(meta.fileId(FILE));
+
+        IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
+
+        IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(
+            igfs.configuration().getDataCacheName());
+
+        for (int i = 0; i < 10; i++) {
+            if (dataCache.containsKey(key))
+                break;
+            else
+                U.sleep(100);
+        }
+
+        fsIn.close();
+
+        // Remove the file from the secondary file system.
+        igfsSecondary.delete(FILE, false);
+
+        // Try reading the third block. Should fail.
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                IgfsInputStream in0 = igfs.open(FILE);
+
+                in0.seek(blockSize * 2);
+
+                try {
+                    in0.read(readBuf);
+                }
+                finally {
+                    U.closeQuiet(in0);
+                }
+
+                return null;
+            }
+        }, IOException.class,
+            "Failed to read data due to secondary file system exception: /dir/subdir/file");
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
new file mode 100644
index 0000000..6c6e709
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+
+/**
+ * Tests for DUAL_ASYNC mode.
+ */
+public class HadoopIgfsDualAsyncSelfTest extends HadoopIgfsDualAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public HadoopIgfsDualAsyncSelfTest() {
+        super(DUAL_ASYNC);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
new file mode 100644
index 0000000..96a63d5
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+
+/**
+ * Tests for DUAL_SYNC mode.
+ */
+public class HadoopIgfsDualSyncSelfTest extends HadoopIgfsDualAbstractSelfTest {
+    /**
+     * Constructor.
+     */
+    public HadoopIgfsDualSyncSelfTest() {
+        super(DUAL_SYNC);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
new file mode 100644
index 0000000..f7af6f0
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsSecondaryFileSystemTestAdapter;
+import org.apache.ignite.internal.util.typedef.T2;
+
+/**
+ * Universal adapter wrapping {@link org.apache.hadoop.fs.FileSystem} instance.
+ */
+public class HadoopIgfsSecondaryFileSystemTestAdapter implements IgfsSecondaryFileSystemTestAdapter {
+    /** File system factory. */
+    private final HadoopFileSystemFactory factory;
+
+    /**
+     * Constructor.
+     * @param factory File system factory.
+     */
+    public HadoopIgfsSecondaryFileSystemTestAdapter(HadoopFileSystemFactory factory) {
+        assert factory != null;
+
+        this.factory = factory;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String name() throws IOException {
+        return get().getUri().toString();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean exists(String path) throws IOException {
+        return get().exists(new Path(path));
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean delete(String path, boolean recursive) throws IOException {
+        return get().delete(new Path(path), recursive);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mkdirs(String path) throws IOException {
+        boolean ok = get().mkdirs(new Path(path));
+        if (!ok)
+            throw new IOException("Failed to mkdirs: " + path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void format() throws IOException {
+        HadoopIgfsUtils.clear(get());
+    }
+
+    /** {@inheritDoc} */
+    @Override public Map<String, String> properties(String path) throws IOException {
+        Path p = new Path(path);
+
+        FileStatus status = get().getFileStatus(p);
+
+        Map<String,String> m = new HashMap<>(3);
+
+        m.put(IgfsUtils.PROP_USER_NAME, status.getOwner());
+        m.put(IgfsUtils.PROP_GROUP_NAME, status.getGroup());
+        m.put(IgfsUtils.PROP_PERMISSION, permission(status));
+
+        return m;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String permissions(String path) throws IOException {
+        return permission(get().getFileStatus(new Path(path)));
+    }
+
+    /**
+     * Get permission for file status.
+     *
+     * @param status Status.
+     * @return Permission.
+     */
+    private String permission(FileStatus status) {
+        FsPermission perm = status.getPermission();
+
+        return "0" + perm.getUserAction().ordinal() + perm.getGroupAction().ordinal() + perm.getOtherAction().ordinal();
+    }
+
+    /** {@inheritDoc} */
+    @Override public InputStream openInputStream(String path) throws IOException {
+        return get().open(new Path(path));
+    }
+
+    /** {@inheritDoc} */
+    @Override public OutputStream openOutputStream(String path, boolean append) throws IOException {
+        Path p = new Path(path);
+
+        if (append)
+            return get().append(p);
+        else
+            return get().create(p, true/*overwrite*/);
+    }
+
+    /** {@inheritDoc} */
+    @Override public T2<Long, Long> times(String path) throws IOException {
+        FileStatus status = get().getFileStatus(new Path(path));
+
+        return new T2<>(status.getAccessTime(), status.getModificationTime());
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsEx igfs() {
+        return null;
+    }
+
+    /**
+     * Create file system.
+     *
+     * @return File system.
+     * @throws IOException If failed.
+     */
+    protected FileSystem get() throws IOException {
+        return factory.get(FileSystemConfiguration.DFLT_USER_NAME);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
new file mode 100644
index 0000000..d9b5d66
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
@@ -0,0 +1,575 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.communication.CommunicationSpi;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.Callable;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
+
+/**
+ * Tests secondary file system configuration.
+ */
+public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstractTest {
+    /** IGFS scheme */
+    static final String IGFS_SCHEME = "igfs";
+
+    /** Primary file system authority. */
+    private static final String PRIMARY_AUTHORITY = "igfs:grid0@";
+
+    /** Autogenerated secondary file system configuration path. */
+    private static final String PRIMARY_CFG_PATH = "/work/core-site-primary-test.xml";
+
+    /** Secondary file system authority. */
+    private static final String SECONDARY_AUTHORITY = "igfs_secondary:grid_secondary@127.0.0.1:11500";
+
+    /** Autogenerated secondary file system configuration path. */
+    static final String SECONDARY_CFG_PATH = "/work/core-site-test.xml";
+
+    /** Secondary endpoint configuration. */
+    protected static final IgfsIpcEndpointConfiguration SECONDARY_ENDPOINT_CFG;
+
+    /** Group size. */
+    public static final int GRP_SIZE = 128;
+
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Primary file system URI. */
+    protected URI primaryFsUri;
+
+    /** Primary file system. */
+    private FileSystem primaryFs;
+
+    /** Full path of primary Fs configuration */
+    private String primaryConfFullPath;
+
+    /** Input primary Fs uri */
+    private String primaryFsUriStr;
+
+    /** Input URI scheme for configuration */
+    private String primaryCfgScheme;
+
+    /** Input URI authority for configuration */
+    private String primaryCfgAuthority;
+
+    /** if to pass configuration */
+    private boolean passPrimaryConfiguration;
+
+    /** Full path of s Fs configuration */
+    private String secondaryConfFullPath;
+
+    /** /Input URI scheme for configuration */
+    private String secondaryFsUriStr;
+
+    /** Input URI scheme for configuration */
+    private String secondaryCfgScheme;
+
+    /** Input URI authority for configuration */
+    private String secondaryCfgAuthority;
+
+    /** if to pass configuration */
+    private boolean passSecondaryConfiguration;
+
+    /** Default IGFS mode. */
+    protected final IgfsMode mode;
+
+    /** Skip embedded mode flag. */
+    private final boolean skipEmbed;
+
+    /** Skip local shmem flag. */
+    private final boolean skipLocShmem;
+
+    static {
+        SECONDARY_ENDPOINT_CFG = new IgfsIpcEndpointConfiguration();
+
+        SECONDARY_ENDPOINT_CFG.setType(IgfsIpcEndpointType.TCP);
+        SECONDARY_ENDPOINT_CFG.setPort(11500);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param mode Default IGFS mode.
+     * @param skipEmbed Whether to skip embedded mode.
+     * @param skipLocShmem Whether to skip local shmem mode.
+     */
+    protected HadoopSecondaryFileSystemConfigurationTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) {
+        this.mode = mode;
+        this.skipEmbed = skipEmbed;
+        this.skipLocShmem = skipLocShmem;
+    }
+
+    /**
+     * Default constructor.
+     */
+    public HadoopSecondaryFileSystemConfigurationTest() {
+        this(PROXY, true, false);
+    }
+
+    /**
+     * Executes before each test.
+     * @throws Exception
+     */
+    private void before() throws Exception {
+        initSecondary();
+
+        if (passPrimaryConfiguration) {
+            Configuration primaryFsCfg = configuration(primaryCfgScheme, primaryCfgAuthority, skipEmbed, skipLocShmem);
+
+            primaryConfFullPath = writeConfiguration(primaryFsCfg, PRIMARY_CFG_PATH);
+        }
+        else
+            primaryConfFullPath = null;
+
+        CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
+
+        fac.setConfigPaths(primaryConfFullPath);
+        fac.setUri(primaryFsUriStr);
+
+        fac.start();
+
+        primaryFs = fac.get(null); //provider.createFileSystem(null);
+
+        primaryFsUri = primaryFs.getUri();
+    }
+
+    /**
+     * Executes after each test.
+     * @throws Exception
+     */
+    private void after() throws Exception {
+        if (primaryFs != null) {
+            try {
+                primaryFs.delete(new Path("/"), true);
+            }
+            catch (Exception ignore) {
+                // No-op.
+            }
+
+            U.closeQuiet(primaryFs);
+        }
+
+        G.stopAll(true);
+
+        delete(primaryConfFullPath);
+        delete(secondaryConfFullPath);
+    }
+
+    /**
+     * Utility method to delete file.
+     *
+     * @param file the file path to delete.
+     */
+    @SuppressWarnings("ResultOfMethodCallIgnored")
+    private static void delete(String file) {
+        if (file != null) {
+            new File(file).delete();
+
+            assertFalse(new File(file).exists());
+        }
+    }
+
+    /**
+     * Initialize underlying secondary filesystem.
+     *
+     * @throws Exception
+     */
+    private void initSecondary() throws Exception {
+        if (passSecondaryConfiguration) {
+            Configuration secondaryConf = configuration(secondaryCfgScheme, secondaryCfgAuthority, true, true);
+
+            secondaryConf.setInt("fs.igfs.block.size", 1024);
+
+            secondaryConfFullPath = writeConfiguration(secondaryConf, SECONDARY_CFG_PATH);
+        }
+        else
+            secondaryConfFullPath = null;
+
+        startNodes();
+    }
+
+    /**
+     * Starts the nodes for this test.
+     *
+     * @throws Exception If failed.
+     */
+    private void startNodes() throws Exception {
+        if (mode != PRIMARY)
+            startSecondary();
+
+        startGrids(4);
+    }
+
+    /**
+     * Starts secondary IGFS
+     */
+    private void startSecondary() {
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("partitioned");
+        igfsCfg.setMetaCacheName("replicated");
+        igfsCfg.setName("igfs_secondary");
+        igfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG);
+        igfsCfg.setBlockSize(512 * 1024);
+        igfsCfg.setPrefetchBlocks(1);
+
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName("grid_secondary");
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+
+        cfg.setCommunicationSpi(communicationSpi());
+
+        G.start(cfg);
+    }
+
+    /**
+     * Get primary IPC endpoint configuration.
+     *
+     * @param gridName Grid name.
+     * @return IPC primary endpoint configuration.
+     */
+    protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
+        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
+
+        cfg.setType(IgfsIpcEndpointType.TCP);
+        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getTestGridName() {
+        return "grid";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(cacheConfiguration());
+        cfg.setFileSystemConfiguration(fsConfiguration(gridName));
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+        cfg.setCommunicationSpi(communicationSpi());
+
+        return cfg;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration[] cacheConfiguration() {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
+    }
+
+    /**
+     * Gets IGFS configuration.
+     *
+     * @param gridName Grid name.
+     * @return IGFS configuration.
+     */
+    protected FileSystemConfiguration fsConfiguration(String gridName) throws IgniteCheckedException {
+        FileSystemConfiguration cfg = new FileSystemConfiguration();
+
+        cfg.setDataCacheName("partitioned");
+        cfg.setMetaCacheName("replicated");
+        cfg.setName("igfs");
+        cfg.setPrefetchBlocks(1);
+        cfg.setDefaultMode(mode);
+
+        if (mode != PRIMARY)
+            cfg.setSecondaryFileSystem(
+                new IgniteHadoopIgfsSecondaryFileSystem(secondaryFsUriStr, secondaryConfFullPath));
+
+        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
+
+        cfg.setManagementPort(-1);
+        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
+
+        return cfg;
+    }
+
+    /** @return Communication SPI. */
+    private CommunicationSpi communicationSpi() {
+        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
+
+        commSpi.setSharedMemoryPort(-1);
+
+        return commSpi;
+    }
+
+    /**
+     * Case #SecondaryFileSystemProvider(null, path)
+     *
+     * @throws Exception On failure.
+     */
+    public void testFsConfigurationOnly() throws Exception {
+        primaryCfgScheme = IGFS_SCHEME;
+        primaryCfgAuthority = PRIMARY_AUTHORITY;
+        passPrimaryConfiguration = true;
+        primaryFsUriStr = null;
+
+        // wrong secondary URI in the configuration:
+        secondaryCfgScheme = IGFS_SCHEME;
+        secondaryCfgAuthority = SECONDARY_AUTHORITY;
+        passSecondaryConfiguration = true;
+        secondaryFsUriStr = null;
+
+        check();
+    }
+
+    /**
+     * Case #SecondaryFileSystemProvider(uri, path), when 'uri' parameter overrides
+     * the Fs uri set in the configuration.
+     *
+     * @throws Exception On failure.
+     */
+    public void testFsUriOverridesUriInConfiguration() throws Exception {
+        // wrong primary URI in the configuration:
+        primaryCfgScheme = "foo";
+        primaryCfgAuthority = "moo:zoo@bee";
+        passPrimaryConfiguration = true;
+        primaryFsUriStr = mkUri(IGFS_SCHEME, PRIMARY_AUTHORITY);
+
+        // wrong secondary URI in the configuration:
+        secondaryCfgScheme = "foo";
+        secondaryCfgAuthority = "moo:zoo@bee";
+        passSecondaryConfiguration = true;
+        secondaryFsUriStr = mkUri(IGFS_SCHEME, SECONDARY_AUTHORITY);
+
+        check();
+    }
+
+    /**
+     * Perform actual check.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("deprecation")
+    private void check() throws Exception {
+        before();
+
+        try {
+            Path fsHome = new Path(primaryFsUri);
+            Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+            Path file = new Path(dir, "someFile");
+
+            assertPathDoesNotExist(primaryFs, file);
+
+            FsPermission fsPerm = new FsPermission((short)644);
+
+            FSDataOutputStream os = primaryFs.create(file, fsPerm, false, 1, (short)1, 1L, null);
+
+            // Try to write something in file.
+            os.write("abc".getBytes());
+
+            os.close();
+
+            // Check file status.
+            FileStatus fileStatus = primaryFs.getFileStatus(file);
+
+            assertFalse(fileStatus.isDir());
+            assertEquals(file, fileStatus.getPath());
+            assertEquals(fsPerm, fileStatus.getPermission());
+        }
+        finally {
+            after();
+        }
+    }
+
+    /**
+     * Create configuration for test.
+     *
+     * @param skipEmbed Whether to skip embedded mode.
+     * @param skipLocShmem Whether to skip local shmem mode.
+     * @return Configuration.
+     */
+    static Configuration configuration(String scheme, String authority, boolean skipEmbed, boolean skipLocShmem) {
+        final Configuration cfg = new Configuration();
+
+        if (scheme != null && authority != null)
+            cfg.set("fs.defaultFS", scheme + "://" + authority + "/");
+
+        setImplClasses(cfg);
+
+        if (authority != null) {
+            if (skipEmbed)
+                cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
+
+            if (skipLocShmem)
+                cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
+        }
+
+        return cfg;
+    }
+
+    /**
+     * Sets Hadoop Fs implementation classes.
+     *
+     * @param cfg the configuration to set parameters into.
+     */
+    static void setImplClasses(Configuration cfg) {
+        cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+        cfg.set("fs.AbstractFileSystem.igfs.impl",
+            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());
+    }
+
+    /**
+     * Check path does not exist in a given FileSystem.
+     *
+     * @param fs FileSystem to check.
+     * @param path Path to check.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void assertPathDoesNotExist(final FileSystem fs, final Path path) {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(path);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /**
+     * Writes down the configuration to local disk and returns its path.
+     *
+     * @param cfg the configuration to write.
+     * @param pathFromIgniteHome path relatively to Ignite home.
+     * @return Full path of the written configuration.
+     */
+    static String writeConfiguration(Configuration cfg, String pathFromIgniteHome) throws IOException {
+        if (!pathFromIgniteHome.startsWith("/"))
+            pathFromIgniteHome = "/" + pathFromIgniteHome;
+
+        final String path = U.getIgniteHome() + pathFromIgniteHome;
+
+        delete(path);
+
+        File file = new File(path);
+
+        try (FileOutputStream fos = new FileOutputStream(file)) {
+            cfg.writeXml(fos);
+        }
+
+        assertTrue(file.exists());
+        return path;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 3 * 60 * 1000;
+    }
+
+    /**
+     * Makes URI.
+     *
+     * @param scheme the scheme
+     * @param authority the authority
+     * @return URI String
+     */
+    static String mkUri(String scheme, String authority) {
+        return scheme + "://" + authority + "/";
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
new file mode 100644
index 0000000..a9d7bad
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
+import org.apache.ignite.internal.util.typedef.G;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
+import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * Test suite for IGFS event tests.
+ */
+@SuppressWarnings("PublicInnerClass")
+public class IgfsEventsTestSuite extends TestSuite {
+    /**
+     * @return Test suite.
+     * @throws Exception Thrown in case of the failure.
+     */
+    public static TestSuite suite() throws Exception {
+        ClassLoader ldr = TestSuite.class.getClassLoader();
+
+        TestSuite suite = new TestSuite("Ignite FS Events Test Suite");
+
+        suite.addTest(new TestSuite(ldr.loadClass(ShmemPrimary.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(ShmemDualSync.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(ShmemDualAsync.class.getName())));
+
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackPrimary.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
+
+        return suite;
+    }
+
+    /**
+     * @return Test suite with only tests that are supported on all platforms.
+     * @throws Exception Thrown in case of the failure.
+     */
+    public static TestSuite suiteNoarchOnly() throws Exception {
+        ClassLoader ldr = TestSuite.class.getClassLoader();
+
+        TestSuite suite = new TestSuite("Ignite IGFS Events Test Suite Noarch Only");
+
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackPrimary.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
+
+        return suite;
+    }
+
+    /**
+     * Shared memory IPC in PRIVATE mode.
+     */
+    public static class ShmemPrimary extends IgfsEventsAbstractSelfTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
+
+            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+            endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
+            endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1);
+
+            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Loopback socket IPS in PRIVATE mode.
+     */
+    public static class LoopbackPrimary extends IgfsEventsAbstractSelfTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
+
+            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+            endpointCfg.setType(IgfsIpcEndpointType.TCP);
+            endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1);
+
+            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Base class for all IGFS tests with primary and secondary file system.
+     */
+    public abstract static class PrimarySecondaryTest extends IgfsEventsAbstractSelfTest {
+        /** Secondary file system. */
+        private static IgniteFileSystem igfsSec;
+
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
+                "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/",
+                "modules/core/src/test/config/hadoop/core-site-secondary.xml"));
+
+            return igfsCfg;
+        }
+
+        /**
+         * @return IGFS configuration for secondary file system.
+         */
+        protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setName("igfs-secondary");
+            igfsCfg.setDefaultMode(PRIMARY);
+
+            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+            endpointCfg.setType(IgfsIpcEndpointType.TCP);
+            endpointCfg.setPort(11500);
+
+            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+            return igfsCfg;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void beforeTestsStarted() throws Exception {
+            igfsSec = startSecondary();
+
+            super.beforeTestsStarted();
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void afterTestsStopped() throws Exception {
+            super.afterTestsStopped();
+
+            G.stopAll(true);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void afterTest() throws Exception {
+            super.afterTest();
+
+            // Clean up secondary file system.
+            igfsSec.format();
+        }
+
+        /**
+         * Start a grid with the secondary file system.
+         *
+         * @return Secondary file system handle.
+         * @throws Exception If failed.
+         */
+        @Nullable private IgniteFileSystem startSecondary() throws Exception {
+            IgniteConfiguration cfg = getConfiguration("grid-secondary", getSecondaryIgfsConfiguration());
+
+            cfg.setLocalHost("127.0.0.1");
+            cfg.setPeerClassLoadingEnabled(false);
+
+            Ignite secG = G.start(cfg);
+
+            return secG.fileSystem("igfs-secondary");
+        }
+    }
+
+    /**
+     * Shared memory IPC in DUAL_SYNC mode.
+     */
+    public static class ShmemDualSync extends PrimarySecondaryTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(DUAL_SYNC);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Shared memory IPC in DUAL_SYNC mode.
+     */
+    public static class ShmemDualAsync extends PrimarySecondaryTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(DUAL_ASYNC);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Loopback socket IPC with secondary file system.
+     */
+    public abstract static class LoopbackPrimarySecondaryTest extends PrimarySecondaryTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
+
+            igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
+                "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/",
+                "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
+
+            return igfsCfg;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getSecondaryIgfsConfiguration();
+
+            igfsCfg.setName("igfs-secondary");
+            igfsCfg.setDefaultMode(PRIMARY);
+
+            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+            endpointCfg.setType(IgfsIpcEndpointType.TCP);
+            endpointCfg.setPort(11500);
+
+            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Loopback IPC in DUAL_SYNC mode.
+     */
+    public static class LoopbackDualSync extends LoopbackPrimarySecondaryTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(DUAL_SYNC);
+
+            return igfsCfg;
+        }
+    }
+
+    /**
+     * Loopback socket IPC in DUAL_ASYNC mode.
+     */
+    public static class LoopbackDualAsync extends LoopbackPrimarySecondaryTest {
+        /** {@inheritDoc} */
+        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
+            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
+
+            igfsCfg.setDefaultMode(DUAL_ASYNC);
+
+            return igfsCfg;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
new file mode 100644
index 0000000..8e79356
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.NearCacheConfiguration;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
+
+/**
+ * Test hadoop file system implementation.
+ */
+public class IgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest {
+    /** Path to the default hadoop configuration. */
+    public static final String HADOOP_FS_CFG = "examples/config/filesystem/core-site.xml";
+
+    /** Group size. */
+    public static final int GRP_SIZE = 128;
+
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Node count. */
+    private int cnt;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrids(nodeCount());
+
+        grid(0).createNearCache("data", new NearCacheConfiguration());
+
+        grid(0).createNearCache("meta", new NearCacheConfiguration());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER).setForceServerMode(true));
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("data");
+        igfsCfg.setMetaCacheName("meta");
+        igfsCfg.setName("igfs");
+
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
+        endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt);
+
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+
+        igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
+
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setCacheConfiguration(cacheConfiguration(gridName, "data"), cacheConfiguration(gridName, "meta"));
+
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+
+        if (cnt == 0)
+            cfg.setClientMode(true);
+
+        cnt++;
+
+        return cfg;
+    }
+
+    /** @return Node count for test. */
+    protected int nodeCount() {
+        return 4;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @param gridName Grid name.
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration cacheConfiguration(String gridName, String cacheName) {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName(cacheName);
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return cacheCfg;
+    }
+
+    /**
+     * Gets config of concrete File System.
+     *
+     * @return Config of concrete File System.
+     */
+    protected Configuration getFileSystemConfig() {
+        Configuration cfg = new Configuration();
+
+        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
+
+        return cfg;
+    }
+
+    /**
+     * Gets File System name.
+     *
+     * @param grid Grid index.
+     * @return File System name.
+     */
+    protected URI getFileSystemURI(int grid) {
+        try {
+            return new URI("igfs://127.0.0.1:" + (IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + grid));
+        }
+        catch (URISyntaxException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testContentsConsistency() throws Exception {
+        try (FileSystem fs = FileSystem.get(getFileSystemURI(0), getFileSystemConfig())) {
+            Collection<IgniteBiTuple<String, Long>> files = F.asList(
+                F.t("/dir1/dir2/file1", 1024L),
+                F.t("/dir1/dir2/file2", 8 * 1024L),
+                F.t("/dir1/file1", 1024 * 1024L),
+                F.t("/dir1/file2", 5 * 1024 * 1024L),
+                F.t("/file1", 64 * 1024L + 13),
+                F.t("/file2", 13L),
+                F.t("/file3", 123764L)
+            );
+
+            for (IgniteBiTuple<String, Long> file : files) {
+
+                info("Writing file: " + file.get1());
+
+                try (OutputStream os = fs.create(new Path(file.get1()), (short)3)) {
+                    byte[] data = new byte[file.get2().intValue()];
+
+                    data[0] = 25;
+                    data[data.length - 1] = 26;
+
+                    os.write(data);
+                }
+
+                info("Finished writing file: " + file.get1());
+            }
+
+            for (int i = 1; i < nodeCount(); i++) {
+
+                try (FileSystem ignored = FileSystem.get(getFileSystemURI(i), getFileSystemConfig())) {
+                    for (IgniteBiTuple<String, Long> file : files) {
+                        Path path = new Path(file.get1());
+
+                        FileStatus fileStatus = fs.getFileStatus(path);
+
+                        assertEquals(file.get2(), (Long)fileStatus.getLen());
+
+                        byte[] read = new byte[file.get2().intValue()];
+
+                        info("Reading file: " + path);
+
+                        try (FSDataInputStream in = fs.open(path)) {
+                            in.readFully(read);
+
+                            assert read[0] == 25;
+                            assert read[read.length - 1] == 26;
+                        }
+
+                        info("Finished reading file: " + path);
+                    }
+                }
+            }
+        }
+    }
+}
\ No newline at end of file


[37/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
new file mode 100644
index 0000000..1344e26
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java
@@ -0,0 +1,654 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.client.hadoop;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.util.StringTokenizer;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.hadoop.HadoopAbstractSelfTest;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ * Hadoop client protocol tests in external process mode.
+ */
+@SuppressWarnings("ResultOfMethodCallIgnored")
+public class HadoopClientProtocolSelfTest extends HadoopAbstractSelfTest {
+    /** Input path. */
+    private static final String PATH_INPUT = "/input";
+
+    /** Output path. */
+    private static final String PATH_OUTPUT = "/output";
+
+    /** Job name. */
+    private static final String JOB_NAME = "myJob";
+
+    /** Setup lock file. */
+    private static File setupLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
+        "ignite-lock-setup.file");
+
+    /** Map lock file. */
+    private static File mapLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
+        "ignite-lock-map.file");
+
+    /** Reduce lock file. */
+    private static File reduceLockFile = new File(U.isWindows() ? System.getProperty("java.io.tmpdir") : "/tmp",
+        "ignite-lock-reduce.file");
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 2;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean restEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrids(gridCount());
+
+        setupLockFile.delete();
+        mapLockFile.delete();
+        reduceLockFile.delete();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+
+//        IgniteHadoopClientProtocolProvider.cliMap.clear();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        setupLockFile.createNewFile();
+        mapLockFile.createNewFile();
+        reduceLockFile.createNewFile();
+
+        setupLockFile.deleteOnExit();
+        mapLockFile.deleteOnExit();
+        reduceLockFile.deleteOnExit();
+
+        super.beforeTest();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        grid(0).fileSystem(HadoopAbstractSelfTest.igfsName).format();
+
+        setupLockFile.delete();
+        mapLockFile.delete();
+        reduceLockFile.delete();
+
+        super.afterTest();
+    }
+
+    /**
+     * Test next job ID generation.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private void tstNextJobId() throws Exception {
+        IgniteHadoopClientProtocolProvider provider = provider();
+
+        ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
+
+        JobID jobId = proto.getNewJobID();
+
+        assert jobId != null;
+        assert jobId.getJtIdentifier() != null;
+
+        JobID nextJobId = proto.getNewJobID();
+
+        assert nextJobId != null;
+        assert nextJobId.getJtIdentifier() != null;
+
+        assert !F.eq(jobId, nextJobId);
+    }
+
+    /**
+     * Tests job counters retrieval.
+     *
+     * @throws Exception If failed.
+     */
+    public void testJobCounters() throws Exception {
+        IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
+
+        igfs.mkdirs(new IgfsPath(PATH_INPUT));
+
+        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(
+            new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
+
+            bw.write(
+                "alpha\n" +
+                "beta\n" +
+                "gamma\n" +
+                "alpha\n" +
+                "beta\n" +
+                "gamma\n" +
+                "alpha\n" +
+                "beta\n" +
+                "gamma\n"
+            );
+        }
+
+        Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
+
+        final Job job = Job.getInstance(conf);
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(TestCountingMapper.class);
+        job.setReducerClass(TestCountingReducer.class);
+        job.setCombinerClass(TestCountingCombiner.class);
+
+        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
+        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
+
+        job.submit();
+
+        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
+
+        assertEquals(0, cntr.getValue());
+
+        cntr.increment(10);
+
+        assertEquals(10, cntr.getValue());
+
+        // Transferring to map phase.
+        setupLockFile.delete();
+
+        // Transferring to reduce phase.
+        mapLockFile.delete();
+
+        job.waitForCompletion(false);
+
+        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
+
+        final Counters counters = job.getCounters();
+
+        assertNotNull("counters cannot be null", counters);
+        assertEquals("wrong counters count", 3, counters.countCounters());
+        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
+        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
+        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
+    }
+
+    /**
+     * Tests job counters retrieval for unknown job id.
+     *
+     * @throws Exception If failed.
+     */
+    private void tstUnknownJobCounters() throws Exception {
+        IgniteHadoopClientProtocolProvider provider = provider();
+
+        ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
+
+        try {
+            proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1));
+            fail("exception must be thrown");
+        }
+        catch (Exception e) {
+            assert e instanceof IOException : "wrong error has been thrown";
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void tstJobSubmitMap() throws Exception {
+        checkJobSubmit(true, true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void tstJobSubmitMapCombine() throws Exception {
+        checkJobSubmit(false, true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void tstJobSubmitMapReduce() throws Exception {
+        checkJobSubmit(true, false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void tstJobSubmitMapCombineReduce() throws Exception {
+        checkJobSubmit(false, false);
+    }
+
+    /**
+     * Test job submission.
+     *
+     * @param noCombiners Whether there are no combiners.
+     * @param noReducers Whether there are no reducers.
+     * @throws Exception If failed.
+     */
+    public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
+        IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
+
+        igfs.mkdirs(new IgfsPath(PATH_INPUT));
+
+        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(
+            new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
+
+            bw.write("word");
+        }
+
+        Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
+
+        final Job job = Job.getInstance(conf);
+
+        job.setJobName(JOB_NAME);
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        job.setMapperClass(TestMapper.class);
+        job.setReducerClass(TestReducer.class);
+
+        if (!noCombiners)
+            job.setCombinerClass(TestCombiner.class);
+
+        if (noReducers)
+            job.setNumReduceTasks(0);
+
+        job.setInputFormatClass(TextInputFormat.class);
+        job.setOutputFormatClass(TestOutputFormat.class);
+
+        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
+        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
+
+        job.submit();
+
+        JobID jobId = job.getJobID();
+
+        // Setup phase.
+        JobStatus jobStatus = job.getStatus();
+        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
+        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
+        assert jobStatus.getMapProgress() == 0.0f;
+        assert jobStatus.getReduceProgress() == 0.0f;
+
+        U.sleep(2100);
+
+        JobStatus recentJobStatus = job.getStatus();
+
+        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() :
+            "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
+
+        // Transferring to map phase.
+        setupLockFile.delete();
+
+        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                try {
+                    return F.eq(1.0f, job.getStatus().getSetupProgress());
+                }
+                catch (Exception e) {
+                    throw new RuntimeException("Unexpected exception.", e);
+                }
+            }
+        }, 5000L);
+
+        // Map phase.
+        jobStatus = job.getStatus();
+        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
+        assert jobStatus.getSetupProgress() == 1.0f;
+        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
+        assert jobStatus.getReduceProgress() == 0.0f;
+
+        U.sleep(2100);
+
+        recentJobStatus = job.getStatus();
+
+        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() :
+            "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
+
+        // Transferring to reduce phase.
+        mapLockFile.delete();
+
+        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                try {
+                    return F.eq(1.0f, job.getStatus().getMapProgress());
+                }
+                catch (Exception e) {
+                    throw new RuntimeException("Unexpected exception.", e);
+                }
+            }
+        }, 5000L);
+
+        if (!noReducers) {
+            // Reduce phase.
+            jobStatus = job.getStatus();
+            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
+            assert jobStatus.getSetupProgress() == 1.0f;
+            assert jobStatus.getMapProgress() == 1.0f;
+            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
+
+            // Ensure that reduces progress increases.
+            U.sleep(2100);
+
+            recentJobStatus = job.getStatus();
+
+            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() :
+                "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
+
+            reduceLockFile.delete();
+        }
+
+        job.waitForCompletion(false);
+
+        jobStatus = job.getStatus();
+        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
+        assert jobStatus.getSetupProgress() == 1.0f;
+        assert jobStatus.getMapProgress() == 1.0f;
+        assert jobStatus.getReduceProgress() == 1.0f;
+
+        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
+    }
+
+    /**
+     * Dump IGFS content.
+     *
+     * @param igfs IGFS.
+     * @param path Path.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ConstantConditions")
+    private static void dumpIgfs(IgniteFileSystem igfs, IgfsPath path) throws Exception {
+        IgfsFile file = igfs.info(path);
+
+        assert file != null;
+
+        System.out.println(file.path());
+
+        if (file.isDirectory()) {
+            for (IgfsPath child : igfs.listPaths(path))
+                dumpIgfs(igfs, child);
+        }
+        else {
+            try (BufferedReader br = new BufferedReader(new InputStreamReader(igfs.open(path)))) {
+                String line = br.readLine();
+
+                while (line != null) {
+                    System.out.println(line);
+
+                    line = br.readLine();
+                }
+            }
+        }
+    }
+
+    /**
+     * Check job status.
+     *
+     * @param status Job status.
+     * @param expJobId Expected job ID.
+     * @param expJobName Expected job name.
+     * @param expState Expected state.
+     * @param expCleanupProgress Expected cleanup progress.
+     * @throws Exception If failed.
+     */
+    private static void checkJobStatus(JobStatus status, JobID expJobId, String expJobName,
+        JobStatus.State expState, float expCleanupProgress) throws Exception {
+        assert F.eq(status.getJobID(), expJobId) : "Expected=" + expJobId + ", actual=" + status.getJobID();
+        assert F.eq(status.getJobName(), expJobName) : "Expected=" + expJobName + ", actual=" + status.getJobName();
+        assert F.eq(status.getState(), expState) : "Expected=" + expState + ", actual=" + status.getState();
+        assert F.eq(status.getCleanupProgress(), expCleanupProgress) :
+            "Expected=" + expCleanupProgress + ", actual=" + status.getCleanupProgress();
+    }
+
+    /**
+     * @return Configuration.
+     */
+    private Configuration config(int port) {
+        Configuration conf = HadoopUtils.safeCreateConfiguration();
+
+        setupFileSystems(conf);
+
+        conf.set(MRConfig.FRAMEWORK_NAME, IgniteHadoopClientProtocolProvider.FRAMEWORK_NAME);
+        conf.set(MRConfig.MASTER_ADDRESS, "127.0.0.1:" + port);
+
+        conf.set("fs.defaultFS", "igfs://:" + getTestGridName(0) + "@/");
+
+        return conf;
+    }
+
+    /**
+     * @return Protocol provider.
+     */
+    private IgniteHadoopClientProtocolProvider provider() {
+        return new IgniteHadoopClientProtocolProvider();
+    }
+
+    /**
+     * Test mapper.
+     */
+    public static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
+        /** Writable container for writing word. */
+        private Text word = new Text();
+
+        /** Writable integer constant of '1' is writing as count of found words. */
+        private static final IntWritable one = new IntWritable(1);
+
+        /** {@inheritDoc} */
+        @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            while (mapLockFile.exists())
+                Thread.sleep(50);
+
+            StringTokenizer wordList = new StringTokenizer(val.toString());
+
+            while (wordList.hasMoreTokens()) {
+                word.set(wordList.nextToken());
+
+                ctx.write(word, one);
+            }
+        }
+    }
+
+    /**
+     * Test Hadoop counters.
+     */
+    public enum TestCounter {
+        COUNTER1, COUNTER2, COUNTER3
+    }
+
+    /**
+     * Test mapper that uses counters.
+     */
+    public static class TestCountingMapper extends TestMapper {
+        /** {@inheritDoc} */
+        @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+            super.map(key, val, ctx);
+            ctx.getCounter(TestCounter.COUNTER1).increment(1);
+        }
+    }
+
+    /**
+     * Test combiner that counts invocations.
+     */
+    public static class TestCountingCombiner extends TestReducer {
+        @Override public void reduce(Text key, Iterable<IntWritable> values,
+            Context ctx) throws IOException, InterruptedException {
+            ctx.getCounter(TestCounter.COUNTER1).increment(1);
+            ctx.getCounter(TestCounter.COUNTER2).increment(1);
+
+            int sum = 0;
+            for (IntWritable value : values)
+                sum += value.get();
+
+            ctx.write(key, new IntWritable(sum));
+        }
+    }
+
+    /**
+     * Test reducer that counts invocations.
+     */
+    public static class TestCountingReducer extends TestReducer {
+        @Override public void reduce(Text key, Iterable<IntWritable> values,
+            Context ctx) throws IOException, InterruptedException {
+            ctx.getCounter(TestCounter.COUNTER1).increment(1);
+            ctx.getCounter(TestCounter.COUNTER3).increment(1);
+        }
+    }
+
+    /**
+     * Test combiner.
+     */
+    public static class TestCombiner extends Reducer<Text, IntWritable, Text, IntWritable> {
+        // No-op.
+    }
+
+    public static class TestOutputFormat<K, V> extends TextOutputFormat<K, V> {
+        /** {@inheritDoc} */
+        @Override public synchronized OutputCommitter getOutputCommitter(TaskAttemptContext ctx)
+            throws IOException {
+            return new TestOutputCommitter(ctx, (FileOutputCommitter)super.getOutputCommitter(ctx));
+        }
+    }
+
+    /**
+     * Test output committer.
+     */
+    private static class TestOutputCommitter extends FileOutputCommitter {
+        /** Delegate. */
+        private final FileOutputCommitter delegate;
+
+        /**
+         * Constructor.
+         *
+         * @param ctx Task attempt context.
+         * @param delegate Delegate.
+         * @throws IOException If failed.
+         */
+        private TestOutputCommitter(TaskAttemptContext ctx, FileOutputCommitter delegate) throws IOException {
+            super(FileOutputFormat.getOutputPath(ctx), ctx);
+
+            this.delegate = delegate;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void setupJob(JobContext jobCtx) throws IOException {
+            try {
+                while (setupLockFile.exists())
+                    Thread.sleep(50);
+            }
+            catch (InterruptedException ignored) {
+                throw new IOException("Interrupted.");
+            }
+
+            delegate.setupJob(jobCtx);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void setupTask(TaskAttemptContext taskCtx) throws IOException {
+            delegate.setupTask(taskCtx);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean needsTaskCommit(TaskAttemptContext taskCtx) throws IOException {
+            return delegate.needsTaskCommit(taskCtx);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void commitTask(TaskAttemptContext taskCtx) throws IOException {
+            delegate.commitTask(taskCtx);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void abortTask(TaskAttemptContext taskCtx) throws IOException {
+            delegate.abortTask(taskCtx);
+        }
+    }
+
+    /**
+     * Test reducer.
+     */
+    public static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
+        /** Writable container for writing sum of word counts. */
+        private IntWritable totalWordCnt = new IntWritable();
+
+        /** {@inheritDoc} */
+        @Override public void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
+            InterruptedException {
+            while (reduceLockFile.exists())
+                Thread.sleep(50);
+
+            int wordCnt = 0;
+
+            for (IntWritable value : values)
+                wordCnt += value.get();
+
+            totalWordCnt.set(wordCnt);
+
+            ctx.write(key, totalWordCnt);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
new file mode 100644
index 0000000..6f910f1
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/cache/HadoopTxConfigCacheTest.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.cache;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
+import org.apache.ignite.internal.processors.cache.IgniteTxConfigCacheSelfTest;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+
+/**
+ * Test checks whether hadoop system cache doesn't use user defined TX config.
+ */
+public class HadoopTxConfigCacheTest  extends IgniteTxConfigCacheSelfTest {
+    /**
+     * Success if system caches weren't timed out.
+     *
+     * @throws Exception
+     */
+    public void testSystemCacheTx() throws Exception {
+        final Ignite ignite = grid(0);
+
+        final IgniteInternalCache<Object, Object> hadoopCache = getSystemCache(ignite, CU.SYS_CACHE_HADOOP_MR);
+
+        checkImplicitTxSuccess(hadoopCache);
+        checkStartTxSuccess(hadoopCache);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
new file mode 100644
index 0000000..ea7fa99
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactorySelfTest.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.util.concurrent.Callable;
+
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Assert;
+
+/**
+ * Tests KerberosHadoopFileSystemFactory.
+ */
+public class KerberosHadoopFileSystemFactorySelfTest extends GridCommonAbstractTest {
+    /**
+     * Test parameters validation.
+     *
+     * @throws Exception If failed.
+     */
+    public void testParameters() throws Exception {
+        checkParameters(null, null, -1);
+
+        checkParameters(null, null, 100);
+        checkParameters(null, "b", -1);
+        checkParameters("a", null, -1);
+
+        checkParameters(null, "b", 100);
+        checkParameters("a", null, 100);
+        checkParameters("a", "b", -1);
+    }
+
+    /**
+     * Check parameters.
+     *
+     * @param keyTab Key tab.
+     * @param keyTabPrincipal Key tab principal.
+     * @param reloginInterval Re-login interval.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void checkParameters(String keyTab, String keyTabPrincipal, long reloginInterval) {
+        final KerberosHadoopFileSystemFactory fac = new KerberosHadoopFileSystemFactory();
+
+        fac.setKeyTab(keyTab);
+        fac.setKeyTabPrincipal(keyTabPrincipal);
+        fac.setReloginInterval(reloginInterval);
+
+        GridTestUtils.assertThrows(null, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fac.start();
+
+                return null;
+            }
+        }, IllegalArgumentException.class, null);
+    }
+
+    /**
+     * Checks serializatuion and deserialization of the secure factory.
+     *
+     * @throws Exception If failed.
+     */
+    public void testSerialization() throws Exception {
+        KerberosHadoopFileSystemFactory fac = new KerberosHadoopFileSystemFactory();
+
+        checkSerialization(fac);
+
+        fac = new KerberosHadoopFileSystemFactory();
+
+        fac.setUri("igfs://igfs@localhost:10500/");
+        fac.setConfigPaths("/a/core-sute.xml", "/b/mapred-site.xml");
+        fac.setKeyTabPrincipal("foo");
+        fac.setKeyTab("/etc/krb5.keytab");
+        fac.setReloginInterval(30 * 60 * 1000L);
+
+        checkSerialization(fac);
+    }
+
+    /**
+     * Serializes the factory,
+     *
+     * @param fac The facory to check.
+     * @throws Exception If failed.
+     */
+    private void checkSerialization(KerberosHadoopFileSystemFactory fac) throws Exception {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+
+        ObjectOutput oo = new ObjectOutputStream(baos);
+
+        oo.writeObject(fac);
+
+        ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()));
+
+        KerberosHadoopFileSystemFactory fac2 = (KerberosHadoopFileSystemFactory)in.readObject();
+
+        assertEquals(fac.getUri(), fac2.getUri());
+        Assert.assertArrayEquals(fac.getConfigPaths(), fac2.getConfigPaths());
+        assertEquals(fac.getKeyTab(), fac2.getKeyTab());
+        assertEquals(fac.getKeyTabPrincipal(), fac2.getKeyTabPrincipal());
+        assertEquals(fac.getReloginInterval(), fac2.getReloginInterval());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
new file mode 100644
index 0000000..fd8fdef
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/BasicUserNameMapperSelfTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.util;
+
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.Nullable;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Test for basic user name mapper.
+ */
+public class BasicUserNameMapperSelfTest extends GridCommonAbstractTest {
+    /**
+     * Test null mappings.
+     *
+     * @throws Exception If failed.
+     */
+    public void testNullMappings() throws Exception {
+        checkNullOrEmptyMappings(null);
+    }
+
+    /**
+     * Test empty mappings.
+     *
+     * @throws Exception If failed.
+     */
+    public void testEmptyMappings() throws Exception {
+        checkNullOrEmptyMappings(new HashMap<String, String>());
+    }
+
+    /**
+     * Check null or empty mappings.
+     *
+     * @param map Mappings.
+     * @throws Exception If failed.
+     */
+    private void checkNullOrEmptyMappings(@Nullable Map<String, String> map) throws Exception {
+        BasicUserNameMapper mapper = create(map, false, null);
+
+        assertNull(mapper.map(null));
+        assertEquals("1", mapper.map("1"));
+        assertEquals("2", mapper.map("2"));
+
+        mapper = create(map, true, null);
+
+        assertNull(mapper.map(null));
+        assertNull(mapper.map("1"));
+        assertNull(mapper.map("2"));
+
+        mapper = create(map, false, "A");
+
+        assertNull(mapper.map(null));
+        assertEquals("1", mapper.map("1"));
+        assertEquals("2", mapper.map("2"));
+
+        mapper = create(map, true, "A");
+
+        assertEquals("A", mapper.map(null));
+        assertEquals("A", mapper.map("1"));
+        assertEquals("A", mapper.map("2"));
+    }
+
+    /**
+     * Test regular mappings.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMappings() throws Exception {
+        Map<String, String> map = new HashMap<>();
+
+        map.put("1", "101");
+
+        BasicUserNameMapper mapper = create(map, false, null);
+
+        assertNull(mapper.map(null));
+        assertEquals("101", mapper.map("1"));
+        assertEquals("2", mapper.map("2"));
+
+        mapper = create(map, true, null);
+
+        assertNull(mapper.map(null));
+        assertEquals("101", mapper.map("1"));
+        assertNull(mapper.map("2"));
+
+        mapper = create(map, false, "A");
+
+        assertNull(mapper.map(null));
+        assertEquals("101", mapper.map("1"));
+        assertEquals("2", mapper.map("2"));
+
+        mapper = create(map, true, "A");
+
+        assertEquals("A", mapper.map(null));
+        assertEquals("101", mapper.map("1"));
+        assertEquals("A", mapper.map("2"));
+    }
+
+    /**
+     * Create mapper.
+     *
+     * @param dictionary Dictionary.
+     * @param useDfltUsrName Whether to use default user name.
+     * @param dfltUsrName Default user name.
+     * @return Mapper.
+     */
+    private BasicUserNameMapper create(@Nullable Map<String, String> dictionary, boolean useDfltUsrName,
+        @Nullable String dfltUsrName) {
+        BasicUserNameMapper mapper = new BasicUserNameMapper();
+
+        mapper.setMappings(dictionary);
+        mapper.setUseDefaultUserName(useDfltUsrName);
+        mapper.setDefaultUserName(dfltUsrName);
+
+        return mapper;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
new file mode 100644
index 0000000..bfac49c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/ChainedUserNameMapperSelfTest.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.util;
+
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import java.util.Collections;
+import java.util.concurrent.Callable;
+
+/**
+ * Tests for chained user name mapper.
+ */
+public class ChainedUserNameMapperSelfTest extends GridCommonAbstractTest {
+    /** Test instance. */
+    private static final String INSTANCE = "test_instance";
+
+    /** Test realm. */
+    private static final String REALM = "test_realm";
+
+    /**
+     * Test case when mappers are null.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testNullMappers() throws Exception {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                create((UserNameMapper[])null);
+
+                return null;
+            }
+        }, IgniteException.class, null);
+    }
+
+    /**
+     * Test case when one of mappers is null.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testNullMapperElement() throws Exception {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                create(new BasicUserNameMapper(), null);
+
+                return null;
+            }
+        }, IgniteException.class, null);
+    }
+
+    /**
+     * Test actual chaining logic.
+     *
+     * @throws Exception If failed.
+     */
+    public void testChaining() throws Exception {
+        BasicUserNameMapper mapper1 = new BasicUserNameMapper();
+
+        mapper1.setMappings(Collections.singletonMap("1", "101"));
+
+        KerberosUserNameMapper mapper2 = new KerberosUserNameMapper();
+
+        mapper2.setInstance(INSTANCE);
+        mapper2.setRealm(REALM);
+
+        ChainedUserNameMapper mapper = create(mapper1, mapper2);
+
+        assertEquals("101" + "/" + INSTANCE + "@" + REALM, mapper.map("1"));
+        assertEquals("2" + "/" + INSTANCE + "@" + REALM, mapper.map("2"));
+        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE + "@" + REALM, mapper.map(null));
+    }
+
+    /**
+     * Create chained mapper.
+     *
+     * @param mappers Child mappers.
+     * @return Chained mapper.
+     */
+    private ChainedUserNameMapper create(UserNameMapper... mappers) {
+        ChainedUserNameMapper mapper = new ChainedUserNameMapper();
+
+        mapper.setMappers(mappers);
+
+        mapper.start();
+
+        return mapper;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
new file mode 100644
index 0000000..cc685bb
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/hadoop/util/KerberosUserNameMapperSelfTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.util;
+
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Tests for Kerberos name mapper.
+ */
+public class KerberosUserNameMapperSelfTest extends GridCommonAbstractTest {
+    /** Test instance. */
+    private static final String INSTANCE = "test_instance";
+
+    /** Test realm. */
+    private static final String REALM = "test_realm";
+
+    /**
+     * Test mapper without instance and realm components.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMapper() throws Exception {
+        KerberosUserNameMapper mapper = create(null, null);
+
+        assertEquals(IgfsUtils.fixUserName(null), mapper.map(null));
+        assertEquals("test", mapper.map("test"));
+    }
+
+    /**
+     * Test mapper with instance component.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMapperInstance() throws Exception {
+        KerberosUserNameMapper mapper = create(INSTANCE, null);
+
+        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE, mapper.map(null));
+        assertEquals("test" + "/" + INSTANCE, mapper.map("test"));
+    }
+
+    /**
+     * Test mapper with realm.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMapperRealm() throws Exception {
+        KerberosUserNameMapper mapper = create(null, REALM);
+
+        assertEquals(IgfsUtils.fixUserName(null) + "@" + REALM, mapper.map(null));
+        assertEquals("test" + "@" + REALM, mapper.map("test"));
+    }
+
+    /**
+     * Test mapper with instance and realm components.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMapperInstanceAndRealm() throws Exception {
+        KerberosUserNameMapper mapper = create(INSTANCE, REALM);
+
+        assertEquals(IgfsUtils.fixUserName(null) + "/" + INSTANCE + "@" + REALM, mapper.map(null));
+        assertEquals("test" + "/" + INSTANCE + "@" + REALM, mapper.map("test"));
+    }
+
+    /**
+     * Create mapper.
+     *
+     * @param instance Instance.
+     * @param realm Realm.
+     * @return Mapper.
+     */
+    private KerberosUserNameMapper create(@Nullable String instance, @Nullable String realm) {
+        KerberosUserNameMapper mapper = new KerberosUserNameMapper();
+
+        mapper.setInstance(instance);
+        mapper.setRealm(realm);
+
+        mapper.start();
+
+        return mapper;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
new file mode 100644
index 0000000..2c25a06
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1DualAbstractTest.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.util.ChainedUserNameMapper;
+import org.apache.ignite.hadoop.util.KerberosUserNameMapper;
+import org.apache.ignite.hadoop.util.UserNameMapper;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsDualAbstractSelfTest;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.IgniteFileSystem.IGFS_SCHEME;
+import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.SECONDARY_CFG_PATH;
+import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.configuration;
+import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.mkUri;
+import static org.apache.ignite.igfs.HadoopSecondaryFileSystemConfigurationTest.writeConfiguration;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+
+/**
+ * Abstract test for Hadoop 1.0 file system stack.
+ */
+public abstract class Hadoop1DualAbstractTest extends IgfsDualAbstractSelfTest {
+    /** Secondary grid name */
+    private static final String GRID_NAME = "grid_secondary";
+
+    /** Secondary file system name */
+    private static final String IGFS_NAME = "igfs_secondary";
+
+    /** Secondary file system REST endpoint port */
+    private static final int PORT = 11500;
+
+    /** Secondary file system REST endpoint configuration map. */
+    private static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration() {{
+        setType(IgfsIpcEndpointType.TCP);
+        setPort(PORT);
+    }};
+
+    /** Secondary file system authority. */
+    private static final String SECONDARY_AUTHORITY = IGFS_NAME + ":" + GRID_NAME + "@127.0.0.1:" + PORT;
+
+    /** Secondary Fs configuration full path. */
+    protected String secondaryConfFullPath;
+
+    /** Secondary Fs URI. */
+    protected String secondaryUri;
+
+    /** Constructor. */
+    public Hadoop1DualAbstractTest(IgfsMode mode) {
+        super(mode);
+    }
+
+    /**
+     * Creates secondary filesystems.
+     * @return IgfsSecondaryFileSystem
+     * @throws Exception On failure.
+     */
+    @Override protected IgfsSecondaryFileSystem createSecondaryFileSystemStack() throws Exception {
+        startUnderlying();
+
+        prepareConfiguration();
+
+        KerberosUserNameMapper mapper1 = new KerberosUserNameMapper();
+
+        mapper1.setRealm("TEST.COM");
+
+        TestUserNameMapper mapper2 = new TestUserNameMapper();
+
+        ChainedUserNameMapper mapper = new ChainedUserNameMapper();
+
+        mapper.setMappers(mapper1, mapper2);
+
+        CachingHadoopFileSystemFactory factory = new CachingHadoopFileSystemFactory();
+
+        factory.setUri(secondaryUri);
+        factory.setConfigPaths(secondaryConfFullPath);
+        factory.setUserNameMapper(mapper);
+
+        IgniteHadoopIgfsSecondaryFileSystem second = new IgniteHadoopIgfsSecondaryFileSystem();
+
+        second.setFileSystemFactory(factory);
+
+        igfsSecondary = new HadoopIgfsSecondaryFileSystemTestAdapter(factory);
+
+        return second;
+    }
+
+    /**
+     * Starts underlying Ignite process.
+     * @throws IOException On failure.
+     */
+    protected void startUnderlying() throws Exception {
+        startGridWithIgfs(GRID_NAME, IGFS_NAME, PRIMARY, null, SECONDARY_REST_CFG, secondaryIpFinder);
+    }
+
+    /**
+     * Prepares Fs configuration.
+     * @throws IOException On failure.
+     */
+    protected void prepareConfiguration() throws IOException {
+        Configuration secondaryConf = configuration(IGFS_SCHEME, SECONDARY_AUTHORITY, true, true);
+
+        secondaryConf.setInt("fs.igfs.block.size", 1024);
+
+        secondaryConfFullPath = writeConfiguration(secondaryConf, SECONDARY_CFG_PATH);
+
+        secondaryUri = mkUri(IGFS_SCHEME, SECONDARY_AUTHORITY);
+    }
+
+    /**
+     * Test user name mapper.
+     */
+    private static class TestUserNameMapper implements UserNameMapper, LifecycleAware {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** Started flag. */
+        private boolean started;
+
+        /** {@inheritDoc} */
+        @Nullable @Override public String map(String name) {
+            assert started;
+            assert name != null && name.contains("@");
+
+            return name.substring(0, name.indexOf("@"));
+        }
+
+        /** {@inheritDoc} */
+        @Override public void start() throws IgniteException {
+            started = true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void stop() throws IgniteException {
+            // No-op.
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
new file mode 100644
index 0000000..bbf1223
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualAsyncTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+/**
+ * DUAL_ASYNC mode test.
+ */
+public class Hadoop1OverIgfsDualAsyncTest extends Hadoop1DualAbstractTest {
+    /**
+     * Constructor.
+     */
+    public Hadoop1OverIgfsDualAsyncTest() {
+        super(IgfsMode.DUAL_ASYNC);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
new file mode 100644
index 0000000..c57415c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/Hadoop1OverIgfsDualSyncTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+/**
+ * DUAL_SYNC mode.
+ */
+public class Hadoop1OverIgfsDualSyncTest extends Hadoop1DualAbstractTest {
+    /**
+     * Constructor.
+     */
+    public Hadoop1OverIgfsDualSyncTest() {
+        super(IgfsMode.DUAL_SYNC);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
new file mode 100644
index 0000000..5be3a64
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/HadoopFIleSystemFactorySelfTest.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.jetbrains.annotations.Nullable;
+import java.io.Externalizable;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.net.URI;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/**
+ * Tests for Hadoop file system factory.
+ */
+public class HadoopFIleSystemFactorySelfTest extends IgfsCommonAbstractTest {
+    /** Amount of "start" invocations */
+    private static final AtomicInteger START_CNT = new AtomicInteger();
+
+    /** Amount of "stop" invocations */
+    private static final AtomicInteger STOP_CNT = new AtomicInteger();
+
+    /** Path to secondary file system configuration. */
+    private static final String SECONDARY_CFG_PATH = "/work/core-site-HadoopFIleSystemFactorySelfTest.xml";
+
+    /** IGFS path for DUAL mode. */
+    private static final Path PATH_DUAL = new Path("/ignite/sync/test_dir");
+
+    /** IGFS path for PROXY mode. */
+    private static final Path PATH_PROXY = new Path("/ignite/proxy/test_dir");
+
+    /** IGFS path for DUAL mode. */
+    private static final IgfsPath IGFS_PATH_DUAL = new IgfsPath("/ignite/sync/test_dir");
+
+    /** IGFS path for PROXY mode. */
+    private static final IgfsPath IGFS_PATH_PROXY = new IgfsPath("/ignite/proxy/test_dir");
+
+    /** Secondary IGFS. */
+    private IgfsEx secondary;
+
+    /** Primary IGFS. */
+    private IgfsEx primary;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        START_CNT.set(0);
+        STOP_CNT.set(0);
+
+        secondary = startSecondary();
+        primary = startPrimary();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        secondary = null;
+        primary = null;
+
+        stopAllGrids();
+    }
+
+    /**
+     * Test custom factory.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCustomFactory() throws Exception {
+        assert START_CNT.get() == 1;
+        assert STOP_CNT.get() == 0;
+
+        // Use IGFS directly.
+        primary.mkdirs(IGFS_PATH_DUAL);
+
+        assert primary.exists(IGFS_PATH_DUAL);
+        assert secondary.exists(IGFS_PATH_DUAL);
+
+        // Create remote instance.
+        FileSystem fs = FileSystem.get(URI.create("igfs://primary:primary@127.0.0.1:10500/"), baseConfiguration());
+
+        // Ensure lifecycle callback was invoked.
+        assert START_CNT.get() == 2;
+        assert STOP_CNT.get() == 0;
+
+        // Check file system operations.
+        assert fs.exists(PATH_DUAL);
+
+        assert fs.delete(PATH_DUAL, true);
+        assert !primary.exists(IGFS_PATH_DUAL);
+        assert !secondary.exists(IGFS_PATH_DUAL);
+        assert !fs.exists(PATH_DUAL);
+
+        assert fs.mkdirs(PATH_DUAL);
+        assert primary.exists(IGFS_PATH_DUAL);
+        assert secondary.exists(IGFS_PATH_DUAL);
+        assert fs.exists(PATH_DUAL);
+
+        assert fs.mkdirs(PATH_PROXY);
+        assert secondary.exists(IGFS_PATH_PROXY);
+        assert fs.exists(PATH_PROXY);
+
+        // Close file system and ensure that associated factory was notified.
+        fs.close();
+
+        assert START_CNT.get() == 2;
+        assert STOP_CNT.get() == 1;
+
+        // Stop primary node and ensure that base factory was notified.
+        G.stop(primary.context().kernalContext().grid().name(), true);
+
+        assert START_CNT.get() == 2;
+        assert STOP_CNT.get() == 2;
+    }
+
+    /**
+     * Start secondary IGFS.
+     *
+     * @return IGFS.
+     * @throws Exception If failed.
+     */
+    private static IgfsEx startSecondary() throws Exception {
+        return start("secondary", 11500, IgfsMode.PRIMARY, null);
+    }
+
+    /**
+     * Start primary IGFS.
+     *
+     * @return IGFS.
+     * @throws Exception If failed.
+     */
+    private static IgfsEx startPrimary() throws Exception {
+        // Prepare configuration.
+        Configuration conf = baseConfiguration();
+
+        conf.set("fs.defaultFS", "igfs://secondary:secondary@127.0.0.1:11500/");
+
+        writeConfigurationToFile(conf);
+
+        // Configure factory.
+        TestFactory factory = new TestFactory();
+
+        factory.setUri("igfs://secondary:secondary@127.0.0.1:11500/");
+        factory.setConfigPaths(SECONDARY_CFG_PATH);
+
+        // Configure file system.
+        IgniteHadoopIgfsSecondaryFileSystem fs = new IgniteHadoopIgfsSecondaryFileSystem();
+
+        fs.setFileSystemFactory(factory);
+
+        // Start.
+        return start("primary", 10500, IgfsMode.DUAL_ASYNC, fs);
+    }
+
+    /**
+     * Start Ignite node with IGFS instance.
+     *
+     * @param name Node and IGFS name.
+     * @param endpointPort Endpoint port.
+     * @param dfltMode Default path mode.
+     * @param secondaryFs Secondary file system.
+     * @return Igfs instance.
+     */
+    private static IgfsEx start(String name, int endpointPort, IgfsMode dfltMode,
+        @Nullable IgfsSecondaryFileSystem secondaryFs) {
+        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
+
+        endpointCfg.setType(IgfsIpcEndpointType.TCP);
+        endpointCfg.setHost("127.0.0.1");
+        endpointCfg.setPort(endpointPort);
+
+        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+        igfsCfg.setDataCacheName("dataCache");
+        igfsCfg.setMetaCacheName("metaCache");
+        igfsCfg.setName(name);
+        igfsCfg.setDefaultMode(dfltMode);
+        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
+        igfsCfg.setSecondaryFileSystem(secondaryFs);
+        igfsCfg.setInitializeDefaultPathModes(true);
+
+        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
+
+        dataCacheCfg.setName("dataCache");
+        dataCacheCfg.setCacheMode(PARTITIONED);
+        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
+        dataCacheCfg.setBackups(0);
+        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
+        dataCacheCfg.setOffHeapMaxMemory(0);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("metaCache");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg = new IgniteConfiguration();
+
+        cfg.setGridName(name);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
+        cfg.setFileSystemConfiguration(igfsCfg);
+
+        cfg.setLocalHost("127.0.0.1");
+        cfg.setConnectorConfiguration(null);
+
+        return (IgfsEx)G.start(cfg).fileSystem(name);
+    }
+
+    /**
+     * Create base FileSystem configuration.
+     *
+     * @return Configuration.
+     */
+    private static Configuration baseConfiguration() {
+        Configuration conf = new Configuration();
+
+        conf.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+
+        return conf;
+    }
+
+    /**
+     * Write configuration to file.
+     *
+     * @param conf Configuration.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("ResultOfMethodCallIgnored")
+    private static void writeConfigurationToFile(Configuration conf) throws Exception {
+        final String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
+
+        File file = new File(path);
+
+        file.delete();
+
+        assertFalse(file.exists());
+
+        try (FileOutputStream fos = new FileOutputStream(file)) {
+            conf.writeXml(fos);
+        }
+
+        assertTrue(file.exists());
+    }
+
+    /**
+     * Test factory.
+     */
+    private static class TestFactory extends CachingHadoopFileSystemFactory {
+        /**
+         * {@link Externalizable} support.
+         */
+        public TestFactory() {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public void start() throws IgniteException {
+            START_CNT.incrementAndGet();
+
+            super.start();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void stop() throws IgniteException {
+            STOP_CNT.incrementAndGet();
+
+            super.stop();
+        }
+    }
+}


[23/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
new file mode 100644
index 0000000..c3aa7d9
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWIthHadoop.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.mapreduce.Job;
+
+/**
+ * Class has a direct Hadoop dependency and a circular dependency on another class.
+ */
+@SuppressWarnings("unused")
+public class CircularWIthHadoop {
+    /** */
+    private Job[][] jobs = new Job[4][4];
+
+    /** */
+    private CircularWithoutHadoop y;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
new file mode 100644
index 0000000..93d659c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/CircularWithoutHadoop.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Does not have direct Hadoop dependency, but has a circular
+ */
+@SuppressWarnings("unused")
+public class CircularWithoutHadoop {
+    /** */
+    private CircularWIthHadoop x;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
new file mode 100644
index 0000000..5b1e8e0
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithCast.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Class contains casting to a Hadoop type.
+ */
+@SuppressWarnings("unused")
+public abstract class WithCast<T> {
+    /** */
+    public abstract T create();
+
+    /** */
+    public void consume(T t) {
+        // noop
+    }
+
+    /** */
+    void test(WithCast<FileSystem> c) {
+        FileSystem fs = c.create();
+
+        c.consume(fs);
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
new file mode 100644
index 0000000..a9ecae0
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithClassAnnotation.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Class has Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+@InterfaceAudience.Public
+public class WithClassAnnotation {
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
new file mode 100644
index 0000000..98c8991
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithConstructorInvocation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Invokes a Hadoop type constructor.
+ */
+@SuppressWarnings("unused")
+public class WithConstructorInvocation {
+    /** */
+    private void foo() {
+        Object x = new Configuration();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
new file mode 100644
index 0000000..80c99e1
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithExtends.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.LocalFileSystem;
+
+/**
+ * Class extends a Hadoop class.
+ */
+public class WithExtends extends LocalFileSystem {
+    // noop
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
new file mode 100644
index 0000000..dd979db
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithField.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Has a Hadoop field.
+ */
+@SuppressWarnings("unused")
+public class WithField {
+    /** */
+    private Configuration conf;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
new file mode 100644
index 0000000..c2d8e5b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithImplements.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Implements a Hadoop interface.
+ */
+public class WithImplements implements Configurable {
+    /** {@inheritDoc} */
+    @Override public void setConf(Configuration conf) {
+        // noop
+    }
+
+    /** {@inheritDoc} */
+    @Override public Configuration getConf() {
+        return null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
new file mode 100644
index 0000000..ce078f1
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithIndirectField.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Has a unidirected dependency on Hadoop-dependent class.
+ */
+@SuppressWarnings("unused")
+public class WithIndirectField {
+    /** */
+    WithField x;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
new file mode 100644
index 0000000..360986c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInitializer.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Has a field initialized with an expression invoking Hadoop method.
+ */
+
+@SuppressWarnings({"ConstantConditions", "unused"})
+public class WithInitializer {
+    /** */
+    private final Object x = org.apache.hadoop.fs.FileSystem.getDefaultUri(null);
+
+    /** */
+    WithInitializer() throws Exception {
+        // noop
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
new file mode 100644
index 0000000..4a5a49c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithInnerClass.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configurable;
+
+/**
+ * Has a *static* inner class depending on Hadoop.
+ */
+@SuppressWarnings("unused")
+public class WithInnerClass {
+    /** */
+    private static abstract class Foo implements Configurable {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
new file mode 100644
index 0000000..ea4a5de
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithLocalVariable.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Has a local variable of Hadoop type.
+ */
+@SuppressWarnings({"unused", "ConstantConditions"})
+public class WithLocalVariable {
+    /** */
+    void foo() {
+        Configuration c = null;
+
+        moo(c);
+    }
+
+    /** */
+    void moo(Object x) {
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
new file mode 100644
index 0000000..ff9fbe0
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodAnnotation.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Method has a Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+public class WithMethodAnnotation {
+    /** */
+    @InterfaceStability.Unstable
+    void foo() {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
new file mode 100644
index 0000000..7f639e4
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodArgument.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Contains a formal parameter of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithMethodArgument {
+    /** */
+    protected void paramaterMethod(Configuration c) {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
new file mode 100644
index 0000000..8fd12ae
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodCheckedException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.ChecksumException;
+
+/**
+ * Method declares a checked Hadoop Exception.
+ */
+@SuppressWarnings("unused")
+public class WithMethodCheckedException {
+    /** */
+    void foo() throws ChecksumException {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
new file mode 100644
index 0000000..de8b306
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodInvocation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Method contains a Hadoop type method invocation.
+ */
+@SuppressWarnings("unused")
+public class WithMethodInvocation {
+    /** */
+    void foo(FileSystem fs) {
+        fs.getChildFileSystems();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
new file mode 100644
index 0000000..0e0ea72
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodReturnType.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Contains a method return value of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithMethodReturnType {
+    /** */
+    FileSystem fsMethod() {
+        return null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
new file mode 100644
index 0000000..dcd471c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithMethodRuntimeException.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+
+/**
+ * Method declares a runtime Hadoop Exception.
+ */
+@SuppressWarnings("unused")
+public class WithMethodRuntimeException {
+    /** */
+    void foo() throws HadoopIllegalArgumentException {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
new file mode 100644
index 0000000..cae1da7
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithOuterClass.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Outer class depends on Hadoop, but Inner *static* one does not.
+ */
+@SuppressWarnings("unused")
+public class WithOuterClass {
+    /** */
+    Configuration c;
+
+    /** */
+    public static class InnerNoHadoop {
+        /** */
+        int x;
+
+        /** */
+        void foo() {}
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
new file mode 100644
index 0000000..9d3414e
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithParameterAnnotation.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Has a paramater annotated with a Hadoop annotation.
+ */
+@SuppressWarnings("unused")
+public class WithParameterAnnotation {
+    /** */
+    void foo(@InterfaceStability.Stable Object annotatedParam) {
+        // No-op.
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
new file mode 100644
index 0000000..301b912
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticField.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Has a static field of Hadoop type.
+ */
+@SuppressWarnings("unused")
+public class WithStaticField {
+    /** */
+    static FileSystem fs;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
new file mode 100644
index 0000000..e0fc2f3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/WithStaticInitializer.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+import java.util.List;
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * Uses Hadoop type in a static initializer.
+ */
+@SuppressWarnings("unused")
+public class WithStaticInitializer {
+    /** */
+    static final List x;
+
+    static {
+        x = FileSystem.getAllStatistics();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
new file mode 100644
index 0000000..ab84740
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/deps/Without.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.deps;
+
+/**
+ * Class that does not anyhow depend on Hadoop.
+ */
+public class Without {
+    // No-op.
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java
new file mode 100644
index 0000000..a2faf95
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapred.TextOutputFormat;
+
+/**
+ * Example job for testing hadoop task execution.
+ */
+public class HadoopWordCount1 {
+    /**
+     * Entry point to start job.
+     * @param args command line parameters.
+     * @throws Exception if fails.
+     */
+    public static void main(String[] args) throws Exception {
+        if (args.length != 2) {
+            System.out.println("usage: [input] [output]");
+            System.exit(-1);
+        }
+
+        JobConf job = getJob(args[0], args[1]);
+
+        JobClient.runJob(job);
+    }
+
+    /**
+     * Gets fully configured JobConf instance.
+     *
+     * @param input input file name.
+     * @param output output directory name.
+     * @return Job configuration
+     */
+    public static JobConf getJob(String input, String output) {
+        JobConf conf = new JobConf(HadoopWordCount1.class);
+        conf.setJobName("wordcount");
+
+        conf.setOutputKeyClass(Text.class);
+        conf.setOutputValueClass(IntWritable.class);
+
+        setTasksClasses(conf, true, true, true);
+
+        FileInputFormat.setInputPaths(conf, new Path(input));
+        FileOutputFormat.setOutputPath(conf, new Path(output));
+
+        return conf;
+    }
+
+    /**
+     * Sets task classes with related info if needed into configuration object.
+     *
+     * @param jobConf Configuration to change.
+     * @param setMapper Option to set mapper and input format classes.
+     * @param setCombiner Option to set combiner class.
+     * @param setReducer Option to set reducer and output format classes.
+     */
+    public static void setTasksClasses(JobConf jobConf, boolean setMapper, boolean setCombiner, boolean setReducer) {
+        if (setMapper) {
+            jobConf.setMapperClass(HadoopWordCount1Map.class);
+            jobConf.setInputFormat(TextInputFormat.class);
+        }
+
+        if (setCombiner)
+            jobConf.setCombinerClass(HadoopWordCount1Reduce.class);
+
+        if (setReducer) {
+            jobConf.setReducerClass(HadoopWordCount1Reduce.class);
+            jobConf.setOutputFormat(TextOutputFormat.class);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java
new file mode 100644
index 0000000..d4cd190
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import java.util.StringTokenizer;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.Mapper;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.ignite.internal.processors.hadoop.HadoopErrorSimulator;
+
+/**
+ * Mapper phase of WordCount job.
+ */
+public class HadoopWordCount1Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
+    /** Writable integer constant of '1' is writing as count of found words. */
+    private static final IntWritable one = new IntWritable(1);
+
+    /** Writable container for writing word. */
+    private Text word = new Text();
+
+    /** Flag is to check that mapper was configured before run. */
+    private boolean wasConfigured;
+
+    /** {@inheritDoc} */
+    @Override public void map(LongWritable key, Text val, OutputCollector<Text, IntWritable> output, Reporter reporter)
+            throws IOException {
+
+        assert wasConfigured : "Mapper should be configured";
+
+        String line = val.toString();
+
+        StringTokenizer tokenizer = new StringTokenizer(line);
+
+        while (tokenizer.hasMoreTokens()) {
+            word.set(tokenizer.nextToken());
+
+            output.collect(word, one);
+        }
+
+        HadoopErrorSimulator.instance().onMap();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void configure(JobConf job) {
+        super.configure(job);
+
+        wasConfigured = true;
+
+        HadoopErrorSimulator.instance().onMapConfigure();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        super.close();
+
+        HadoopErrorSimulator.instance().onMapClose();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
new file mode 100644
index 0000000..b400d9b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import java.util.Iterator;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapReduceBase;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.ignite.internal.processors.hadoop.HadoopErrorSimulator;
+
+/**
+ * Combiner and Reducer phase of WordCount job.
+ */
+public class HadoopWordCount1Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
+    /** Flag is to check that mapper was configured before run. */
+    private boolean wasConfigured;
+
+    /** {@inheritDoc} */
+    @Override public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter)
+            throws IOException {
+        assert wasConfigured : "Reducer should be configured";
+
+        int sum = 0;
+
+        while (values.hasNext())
+            sum += values.next().get();
+
+        output.collect(key, new IntWritable(sum));
+
+        HadoopErrorSimulator.instance().onReduce();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void configure(JobConf job) {
+        super.configure(job);
+
+        wasConfigured = true;
+
+        HadoopErrorSimulator.instance().onReduceConfigure();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
new file mode 100644
index 0000000..b2cfee3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.SnappyCodec;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+
+/**
+ * Example job for testing hadoop task execution.
+ */
+public class HadoopWordCount2 {
+    /**
+     * Entry point to start job.
+     *
+     * @param args Command line parameters.
+     * @throws Exception If fails.
+     */
+    public static void main(String[] args) throws Exception {
+        if (args.length != 2) {
+            System.out.println("usage: [input] [output]");
+            System.exit(-1);
+        }
+
+        Job job = getJob(args[0], args[1]);
+
+        job.submit();
+    }
+
+    /**
+     * Gets fully configured Job instance.
+     *
+     * @param input Input file name.
+     * @param output Output directory name.
+     * @return Job instance.
+     * @throws IOException If fails.
+     */
+    public static Job getJob(String input, String output) throws IOException {
+        Job job = Job.getInstance();
+
+        job.setOutputKeyClass(Text.class);
+        job.setOutputValueClass(IntWritable.class);
+
+        setTasksClasses(job, true, true, true, false);
+
+        FileInputFormat.setInputPaths(job, new Path(input));
+        FileOutputFormat.setOutputPath(job, new Path(output));
+
+        job.setJarByClass(HadoopWordCount2.class);
+
+        return job;
+    }
+
+    /**
+     * Sets task classes with related info if needed into configuration object.
+     *
+     * @param job Configuration to change.
+     * @param setMapper Option to set mapper and input format classes.
+     * @param setCombiner Option to set combiner class.
+     * @param setReducer Option to set reducer and output format classes.
+     */
+    public static void setTasksClasses(Job job, boolean setMapper, boolean setCombiner, boolean setReducer,
+            boolean outputCompression) {
+        if (setMapper) {
+            job.setMapperClass(HadoopWordCount2Mapper.class);
+            job.setInputFormatClass(TextInputFormat.class);
+        }
+
+        if (setCombiner)
+            job.setCombinerClass(HadoopWordCount2Combiner.class);
+
+        if (setReducer) {
+            job.setReducerClass(HadoopWordCount2Reducer.class);
+            job.setOutputFormatClass(TextOutputFormat.class);
+        }
+
+        if (outputCompression) {
+            job.setOutputFormatClass(SequenceFileOutputFormat.class);
+
+            SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
+
+            SequenceFileOutputFormat.setCompressOutput(job, true);
+
+            job.getConfiguration().set(FileOutputFormat.COMPRESS_CODEC, SnappyCodec.class.getName());
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Combiner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Combiner.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Combiner.java
new file mode 100644
index 0000000..0d25e3c
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Combiner.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import org.apache.ignite.internal.processors.hadoop.HadoopErrorSimulator;
+
+/**
+ * Combiner function with pluggable error simulator.
+ */
+public class HadoopWordCount2Combiner extends HadoopWordCount2Reducer {
+    /** {@inheritDoc} */
+    @Override protected void configError() {
+        HadoopErrorSimulator.instance().onCombineConfigure();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void setupError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onCombineSetup();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void reduceError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onCombine();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void cleanupError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onCombineCleanup();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java
new file mode 100644
index 0000000..76857e6
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import java.util.StringTokenizer;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.ignite.internal.processors.hadoop.HadoopErrorSimulator;
+
+/**
+ * Mapper phase of WordCount job.
+ */
+public class HadoopWordCount2Mapper extends Mapper<Object, Text, Text, IntWritable> implements Configurable {
+    /** Writable container for writing word. */
+    private Text word = new Text();
+
+    /** Writable integer constant of '1' is writing as count of found words. */
+    private static final IntWritable one = new IntWritable(1);
+
+    /** Flag is to check that mapper was configured before run. */
+    private boolean wasConfigured;
+
+    /** Flag is to check that mapper was set up before run. */
+    private boolean wasSetUp;
+
+    /** {@inheritDoc} */
+    @Override public void map(Object key, Text val, Context ctx) throws IOException, InterruptedException {
+        assert wasConfigured : "Mapper should be configured";
+        assert wasSetUp : "Mapper should be set up";
+
+        StringTokenizer wordList = new StringTokenizer(val.toString());
+
+        while (wordList.hasMoreTokens()) {
+            word.set(wordList.nextToken());
+
+            ctx.write(word, one);
+        }
+
+        HadoopErrorSimulator.instance().onMap();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void setup(Context ctx) throws IOException, InterruptedException {
+        super.setup(ctx);
+
+        wasSetUp = true;
+
+        HadoopErrorSimulator.instance().onMapSetup();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void cleanup(Context ctx) throws IOException, InterruptedException {
+        super.cleanup(ctx);
+
+        HadoopErrorSimulator.instance().onMapCleanup();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setConf(Configuration conf) {
+        wasConfigured = true;
+
+        HadoopErrorSimulator.instance().onMapConfigure();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Configuration getConf() {
+        return null;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
new file mode 100644
index 0000000..e780170
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.examples;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.ignite.internal.processors.hadoop.HadoopErrorSimulator;
+
+/**
+ * Combiner and Reducer phase of WordCount job.
+ */
+public class HadoopWordCount2Reducer extends Reducer<Text, IntWritable, Text, IntWritable> implements Configurable {
+    /** Writable container for writing sum of word counts. */
+    private IntWritable totalWordCnt = new IntWritable();
+
+    /** Flag is to check that mapper was configured before run. */
+    private boolean wasConfigured;
+
+    /** Flag is to check that mapper was set up before run. */
+    private boolean wasSetUp;
+
+    /** {@inheritDoc} */
+    @Override public void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException, InterruptedException {
+        assert wasConfigured : "Reducer should be configured";
+        assert wasSetUp : "Reducer should be set up";
+
+        int wordCnt = 0;
+
+        for (IntWritable value : values)
+            wordCnt += value.get();
+
+        totalWordCnt.set(wordCnt);
+
+        ctx.write(key, totalWordCnt);
+
+        reduceError();
+    }
+
+    /**
+     * Simulates reduce error if needed.
+     */
+    protected void reduceError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onReduce();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void setup(Context context) throws IOException, InterruptedException {
+        super.setup(context);
+
+        wasSetUp = true;
+
+        setupError();
+    }
+
+    /**
+     * Simulates setup error if needed.
+     */
+    protected void setupError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onReduceSetup();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void cleanup(Context context) throws IOException, InterruptedException {
+        super.cleanup(context);
+
+        cleanupError();
+    }
+
+    /**
+     * Simulates cleanup error if needed.
+     */
+    protected void cleanupError() throws IOException, InterruptedException {
+        HadoopErrorSimulator.instance().onReduceCleanup();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setConf(Configuration conf) {
+        wasConfigured = true;
+
+        configError();
+    }
+
+    /**
+     * Simulates configuration error if needed.
+     */
+    protected void configError() {
+        HadoopErrorSimulator.instance().onReduceConfigure();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Configuration getConf() {
+        return null;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java
new file mode 100644
index 0000000..5266875
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.util.Comparator;
+import java.util.concurrent.Callable;
+import org.apache.commons.collections.comparators.ComparableComparator;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopWritableSerialization;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Abstract class for maps test.
+ */
+public abstract class HadoopAbstractMapTest extends GridCommonAbstractTest {
+    /**
+     * Test task context.
+     */
+    protected static class TaskContext extends HadoopTaskContext {
+        /**
+         */
+        protected TaskContext() {
+            super(null, null);
+        }
+
+        /** {@inheritDoc} */
+        @Override public <T extends HadoopCounter> T counter(String grp, String name, Class<T> cls) {
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopCounters counters() {
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopPartitioner partitioner() throws IgniteCheckedException {
+            assert false;
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopSerialization keySerialization() throws IgniteCheckedException {
+            return new HadoopWritableSerialization(IntWritable.class);
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopSerialization valueSerialization() throws IgniteCheckedException {
+            return new HadoopWritableSerialization(IntWritable.class);
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("unchecked")
+        @Override public Comparator<Object> sortComparator() {
+            return ComparableComparator.getInstance();
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("unchecked")
+        @Override public Comparator<Object> groupComparator() {
+            return ComparableComparator.getInstance();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() throws IgniteCheckedException {
+            assert false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cancel() {
+            assert false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void prepareTaskEnvironment() throws IgniteCheckedException {
+            assert false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cleanupTaskEnvironment() throws IgniteCheckedException {
+            assert false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public <T> T runAsJobOwner(Callable<T> c) throws IgniteCheckedException {
+            try {
+                return c.call();
+            }
+            catch (Exception e) {
+                throw new IgniteCheckedException(e);
+            }
+        }
+    }
+
+    /**
+     * Test job info.
+     */
+    protected static class JobInfo implements HadoopJobInfo {
+        /** {@inheritDoc} */
+        @Nullable @Override public String property(String name) {
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasCombiner() {
+            assert false;
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasReducer() {
+            assert false;
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public HadoopJob createJob(Class<? extends HadoopJob> jobCls, HadoopJobId jobId, IgniteLogger log,
+            @Nullable String[] libNames) throws IgniteCheckedException {
+            assert false;
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int reducers() {
+            assert false;
+
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String jobName() {
+            assert false;
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String user() {
+            assert false;
+
+            return null;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java
new file mode 100644
index 0000000..a37d74b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Multimap;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.io.GridDataInput;
+import org.apache.ignite.internal.util.io.GridUnsafeDataInput;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.X;
+
+/**
+ *
+ */
+public class HadoopConcurrentHashMultimapSelftest extends HadoopAbstractMapTest {
+    /** */
+    public void testMapSimple() throws Exception {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+//        mem.listen(new GridOffHeapEventListener() {
+//            @Override public void onEvent(GridOffHeapEvent evt) {
+//                if (evt == GridOffHeapEvent.ALLOCATE)
+//                    U.dumpStack();
+//            }
+//        });
+
+        Random rnd = new Random();
+
+        int mapSize = 16 << rnd.nextInt(3);
+
+        HadoopJobInfo job = new JobInfo();
+
+        HadoopTaskContext taskCtx = new TaskContext();
+
+        HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, mapSize);
+
+        HadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx);
+
+        Multimap<Integer, Integer> mm = ArrayListMultimap.create();
+        Multimap<Integer, Integer> vis = ArrayListMultimap.create();
+
+        for (int i = 0, vals = 4 * mapSize + rnd.nextInt(25); i < vals; i++) {
+            int key = rnd.nextInt(mapSize);
+            int val = rnd.nextInt();
+
+            a.write(new IntWritable(key), new IntWritable(val));
+            mm.put(key, val);
+
+            X.println("k: " + key + " v: " + val);
+
+            a.close();
+
+            check(m, mm, vis, taskCtx);
+
+            a = m.startAdding(taskCtx);
+        }
+
+//        a.add(new IntWritable(10), new IntWritable(2));
+//        mm.put(10, 2);
+//        check(m, mm);
+
+        a.close();
+
+        X.println("Alloc: " + mem.allocatedSize());
+
+        m.close();
+
+        assertEquals(0, mem.allocatedSize());
+    }
+
+    private void check(HadoopConcurrentHashMultimap m, Multimap<Integer, Integer> mm,
+        final Multimap<Integer, Integer> vis, HadoopTaskContext taskCtx) throws Exception {
+        final HadoopTaskInput in = m.input(taskCtx);
+
+        Map<Integer, Collection<Integer>> mmm = mm.asMap();
+
+        int keys = 0;
+
+        while (in.next()) {
+            keys++;
+
+            IntWritable k = (IntWritable)in.key();
+
+            assertNotNull(k);
+
+            Deque<Integer> vs = new LinkedList<>();
+
+            Iterator<?> it = in.values();
+
+            while (it.hasNext())
+                vs.addFirst(((IntWritable) it.next()).get());
+
+            Collection<Integer> exp = mmm.get(k.get());
+
+            assertEquals(exp, vs);
+        }
+
+        assertEquals(mmm.size(), keys);
+
+        assertEquals(m.keys(), keys);
+
+        X.println("keys: " + keys + " cap: " + m.capacity());
+
+        // Check visitor.
+
+        final byte[] buf = new byte[4];
+
+        final GridDataInput dataInput = new GridUnsafeDataInput();
+
+        m.visit(false, new HadoopConcurrentHashMultimap.Visitor() {
+            /** */
+            IntWritable key = new IntWritable();
+
+            /** */
+            IntWritable val = new IntWritable();
+
+            @Override public void onKey(long keyPtr, int keySize) {
+                read(keyPtr, keySize, key);
+            }
+
+            @Override public void onValue(long valPtr, int valSize) {
+                read(valPtr, valSize, val);
+
+                vis.put(key.get(), val.get());
+            }
+
+            private void read(long ptr, int size, Writable w) {
+                assert size == 4 : size;
+
+                GridUnsafe.copyMemory(null, ptr, buf, GridUnsafe.BYTE_ARR_OFF, size);
+
+                dataInput.bytes(buf, size);
+
+                try {
+                    w.readFields(dataInput);
+                }
+                catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        });
+
+//        X.println("vis: " + vis);
+
+        assertEquals(mm, vis);
+
+        in.close();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testMultiThreaded() throws Exception {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+        X.println("___ Started");
+
+        Random rnd = new GridRandom();
+
+        for (int i = 0; i < 20; i++) {
+            HadoopJobInfo job = new JobInfo();
+
+            final HadoopTaskContext taskCtx = new TaskContext();
+
+            final HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, 16);
+
+            final ConcurrentMap<Integer, Collection<Integer>> mm = new ConcurrentHashMap<>();
+
+            X.println("___ MT");
+
+            multithreaded(new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    X.println("___ TH in");
+
+                    Random rnd = new GridRandom();
+
+                    IntWritable key = new IntWritable();
+                    IntWritable val = new IntWritable();
+
+                    HadoopMultimap.Adder a = m.startAdding(taskCtx);
+
+                    for (int i = 0; i < 50000; i++) {
+                        int k = rnd.nextInt(32000);
+                        int v = rnd.nextInt();
+
+                        key.set(k);
+                        val.set(v);
+
+                        a.write(key, val);
+
+                        Collection<Integer> list = mm.get(k);
+
+                        if (list == null) {
+                            list = new ConcurrentLinkedQueue<>();
+
+                            Collection<Integer> old = mm.putIfAbsent(k, list);
+
+                            if (old != null)
+                                list = old;
+                        }
+
+                        list.add(v);
+                    }
+
+                    a.close();
+
+                    X.println("___ TH out");
+
+                    return null;
+                }
+            }, 3 + rnd.nextInt(27));
+
+            X.println("___ Check: " + m.capacity());
+
+            assertEquals(mm.size(), m.keys());
+
+            assertTrue(m.capacity() > 32000);
+
+            HadoopTaskInput in = m.input(taskCtx);
+
+            while (in.next()) {
+                IntWritable key = (IntWritable) in.key();
+
+                Iterator<?> valsIter = in.values();
+
+                Collection<Integer> vals = mm.remove(key.get());
+
+                assertNotNull(vals);
+
+                while (valsIter.hasNext()) {
+                    IntWritable val = (IntWritable) valsIter.next();
+
+                    assertTrue(vals.remove(val.get()));
+                }
+
+                assertTrue(vals.isEmpty());
+            }
+
+            in.close();
+            m.close();
+
+            assertEquals(0, mem.allocatedSize());
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java
new file mode 100644
index 0000000..04585ec
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Multimap;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.X;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+
+/**
+ *
+ */
+public class HadoopHashMapSelfTest extends HadoopAbstractMapTest {
+    /**
+     * Test simple map.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMapSimple() throws Exception {
+        GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+        Random rnd = new Random();
+
+        int mapSize = 16 << rnd.nextInt(3);
+
+        HadoopTaskContext taskCtx = new TaskContext();
+
+        final HadoopHashMultimap m = new HadoopHashMultimap(new JobInfo(), mem, mapSize);
+
+        HadoopMultimap.Adder a = m.startAdding(taskCtx);
+
+        Multimap<Integer, Integer> mm = ArrayListMultimap.create();
+
+        for (int i = 0, vals = 4 * mapSize + rnd.nextInt(25); i < vals; i++) {
+            int key = rnd.nextInt(mapSize);
+            int val = rnd.nextInt();
+
+            a.write(new IntWritable(key), new IntWritable(val));
+            mm.put(key, val);
+
+            X.println("k: " + key + " v: " + val);
+
+            a.close();
+
+            check(m, mm, taskCtx);
+
+            a = m.startAdding(taskCtx);
+        }
+
+//        a.add(new IntWritable(10), new IntWritable(2));
+//        mm.put(10, 2);
+//        check(m, mm);
+
+        a.close();
+
+        X.println("Alloc: " + mem.allocatedSize());
+
+        m.close();
+
+        assertEquals(0, mem.allocatedSize());
+    }
+
+    private void check(HadoopHashMultimap m, Multimap<Integer, Integer> mm, HadoopTaskContext taskCtx) throws Exception {
+        final HadoopTaskInput in = m.input(taskCtx);
+
+        Map<Integer, Collection<Integer>> mmm = mm.asMap();
+
+        int keys = 0;
+
+        while (in.next()) {
+            keys++;
+
+            IntWritable k = (IntWritable)in.key();
+
+            assertNotNull(k);
+
+            ArrayList<Integer> vs = new ArrayList<>();
+
+            Iterator<?> it = in.values();
+
+            while (it.hasNext())
+                vs.add(((IntWritable) it.next()).get());
+
+            Collection<Integer> exp = mmm.get(k.get());
+
+            assertEquals(sorted(exp), sorted(vs));
+        }
+
+        X.println("keys: " + keys + " cap: " + m.capacity());
+
+        assertEquals(mmm.size(), keys);
+
+        assertEquals(m.keys(), keys);
+
+        in.close();
+    }
+
+    private GridLongList sorted(Collection<Integer> col) {
+        GridLongList lst = new GridLongList(col.size());
+
+        for (Integer i : col)
+            lst.add(i);
+
+        return lst.sort();
+    }
+}
\ No newline at end of file


[15/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
deleted file mode 100644
index f3e17f3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java
+++ /dev/null
@@ -1,1706 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.jobtracker;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicInteger;
-import javax.cache.event.CacheEntryEvent;
-import javax.cache.event.CacheEntryUpdatedListener;
-import javax.cache.expiry.Duration;
-import javax.cache.expiry.ExpiryPolicy;
-import javax.cache.expiry.ModifiedExpiryPolicy;
-import javax.cache.processor.EntryProcessor;
-import javax.cache.processor.MutableEntry;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.events.DiscoveryEvent;
-import org.apache.ignite.events.Event;
-import org.apache.ignite.events.EventType;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
-import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
-import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
-import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
-import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
-import org.apache.ignite.internal.processors.hadoop.HadoopContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlanner;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskStatus;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
-import org.apache.ignite.internal.util.GridMutex;
-import org.apache.ignite.internal.util.GridSpinReadWriteLock;
-import org.apache.ignite.internal.util.future.GridFinishedFuture;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.typedef.CI1;
-import org.apache.ignite.internal.util.typedef.CIX1;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.CU;
-import org.apache.ignite.internal.util.typedef.internal.SB;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteInClosure;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentHashMap8;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_CANCELLING;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_COMPLETE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_MAP;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_REDUCE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_SETUP;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.ABORT;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.COMMIT;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.REDUCE;
-import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.SETUP;
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.COMPLETED;
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.CRASHED;
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.FAILED;
-import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.RUNNING;
-
-/**
- * Hadoop job tracker.
- */
-public class HadoopJobTracker extends HadoopComponent {
-    /** */
-    private final GridMutex mux = new GridMutex();
-
-    /** */
-    private volatile IgniteInternalCache<HadoopJobId, HadoopJobMetadata> jobMetaPrj;
-
-    /** Projection with expiry policy for finished job updates. */
-    private volatile IgniteInternalCache<HadoopJobId, HadoopJobMetadata> finishedJobMetaPrj;
-
-    /** Map-reduce execution planner. */
-    @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-    private HadoopMapReducePlanner mrPlanner;
-
-    /** All the known jobs. */
-    private final ConcurrentMap<HadoopJobId, GridFutureAdapter<HadoopJob>> jobs = new ConcurrentHashMap8<>();
-
-    /** Locally active jobs. */
-    private final ConcurrentMap<HadoopJobId, JobLocalState> activeJobs = new ConcurrentHashMap8<>();
-
-    /** Locally requested finish futures. */
-    private final ConcurrentMap<HadoopJobId, GridFutureAdapter<HadoopJobId>> activeFinishFuts =
-        new ConcurrentHashMap8<>();
-
-    /** Event processing service. */
-    private ExecutorService evtProcSvc;
-
-    /** Component busy lock. */
-    private GridSpinReadWriteLock busyLock;
-
-    /** Class to create HadoopJob instances from. */
-    private Class<? extends HadoopJob> jobCls;
-
-    /** Closure to check result of async transform of system cache. */
-    private final IgniteInClosure<IgniteInternalFuture<?>> failsLog = new CI1<IgniteInternalFuture<?>>() {
-        @Override public void apply(IgniteInternalFuture<?> gridFut) {
-            try {
-                gridFut.get();
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to transform system cache.", e);
-            }
-        }
-    };
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void start(final HadoopContext ctx) throws IgniteCheckedException {
-        super.start(ctx);
-
-        busyLock = new GridSpinReadWriteLock();
-
-        evtProcSvc = Executors.newFixedThreadPool(1);
-
-        UUID nodeId = ctx.localNodeId();
-
-        assert jobCls == null;
-
-        String[] libNames = null;
-
-        if (ctx.configuration() != null)
-            libNames = ctx.configuration().getNativeLibraryNames();
-
-        HadoopClassLoader ldr = new HadoopClassLoader(null, HadoopClassLoader.nameForJob(nodeId), libNames);
-
-        try {
-            jobCls = (Class<HadoopV2Job>)ldr.loadClass(HadoopV2Job.class.getName());
-        }
-        catch (Exception ioe) {
-            throw new IgniteCheckedException("Failed to load job class [class="
-                + HadoopV2Job.class.getName() + ']', ioe);
-        }
-    }
-
-    /**
-     * @return Job meta projection.
-     */
-    @SuppressWarnings("NonPrivateFieldAccessedInSynchronizedContext")
-    private IgniteInternalCache<HadoopJobId, HadoopJobMetadata> jobMetaCache() {
-        IgniteInternalCache<HadoopJobId, HadoopJobMetadata> prj = jobMetaPrj;
-
-        if (prj == null) {
-            synchronized (mux) {
-                if ((prj = jobMetaPrj) == null) {
-                    GridCacheAdapter<HadoopJobId, HadoopJobMetadata> sysCache = ctx.kernalContext().cache()
-                        .internalCache(CU.SYS_CACHE_HADOOP_MR);
-
-                    assert sysCache != null;
-
-                    mrPlanner = ctx.planner();
-
-                    try {
-                        ctx.kernalContext().resource().injectGeneric(mrPlanner);
-                    }
-                    catch (IgniteCheckedException e) { // Must not happen.
-                        U.error(log, "Failed to inject resources.", e);
-
-                        throw new IllegalStateException(e);
-                    }
-
-                    jobMetaPrj = prj = sysCache;
-
-                    if (ctx.configuration().getFinishedJobInfoTtl() > 0) {
-                        ExpiryPolicy finishedJobPlc = new ModifiedExpiryPolicy(
-                            new Duration(MILLISECONDS, ctx.configuration().getFinishedJobInfoTtl()));
-
-                        finishedJobMetaPrj = prj.withExpiryPolicy(finishedJobPlc);
-                    }
-                    else
-                        finishedJobMetaPrj = jobMetaPrj;
-                }
-            }
-        }
-
-        return prj;
-    }
-
-    /**
-     * @return Projection with expiry policy for finished job updates.
-     */
-    private IgniteInternalCache<HadoopJobId, HadoopJobMetadata> finishedJobMetaCache() {
-        IgniteInternalCache<HadoopJobId, HadoopJobMetadata> prj = finishedJobMetaPrj;
-
-        if (prj == null) {
-            jobMetaCache();
-
-            prj = finishedJobMetaPrj;
-
-            assert prj != null;
-        }
-
-        return prj;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public void onKernalStart() throws IgniteCheckedException {
-        super.onKernalStart();
-
-        jobMetaCache().context().continuousQueries().executeInternalQuery(
-            new CacheEntryUpdatedListener<HadoopJobId, HadoopJobMetadata>() {
-                @Override public void onUpdated(final Iterable<CacheEntryEvent<? extends HadoopJobId,
-                    ? extends HadoopJobMetadata>> evts) {
-                    if (!busyLock.tryReadLock())
-                        return;
-
-                    try {
-                        // Must process query callback in a separate thread to avoid deadlocks.
-                        evtProcSvc.submit(new EventHandler() {
-                            @Override protected void body() throws IgniteCheckedException {
-                                processJobMetadataUpdates(evts);
-                            }
-                        });
-                    }
-                    finally {
-                        busyLock.readUnlock();
-                    }
-                }
-            },
-            null,
-            true,
-            true,
-            false
-        );
-
-        ctx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() {
-            @Override public void onEvent(final Event evt) {
-                if (!busyLock.tryReadLock())
-                    return;
-
-                try {
-                    // Must process discovery callback in a separate thread to avoid deadlock.
-                    evtProcSvc.submit(new EventHandler() {
-                        @Override protected void body() {
-                            processNodeLeft((DiscoveryEvent)evt);
-                        }
-                    });
-                }
-                finally {
-                    busyLock.readUnlock();
-                }
-            }
-        }, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onKernalStop(boolean cancel) {
-        super.onKernalStop(cancel);
-
-        busyLock.writeLock();
-
-        evtProcSvc.shutdown();
-
-        // Fail all pending futures.
-        for (GridFutureAdapter<HadoopJobId> fut : activeFinishFuts.values())
-            fut.onDone(new IgniteCheckedException("Failed to execute Hadoop map-reduce job (grid is stopping)."));
-    }
-
-    /**
-     * Submits execution of Hadoop job to grid.
-     *
-     * @param jobId Job ID.
-     * @param info Job info.
-     * @return Job completion future.
-     */
-    @SuppressWarnings("unchecked")
-    public IgniteInternalFuture<HadoopJobId> submit(HadoopJobId jobId, HadoopJobInfo info) {
-        if (!busyLock.tryReadLock()) {
-            return new GridFinishedFuture<>(new IgniteCheckedException("Failed to execute map-reduce job " +
-                "(grid is stopping): " + info));
-        }
-
-        try {
-            long jobPrepare = U.currentTimeMillis();
-
-            if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId))
-                throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
-
-            HadoopJob job = job(jobId, info);
-
-            HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null);
-
-            HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info);
-
-            meta.mapReducePlan(mrPlan);
-
-            meta.pendingSplits(allSplits(mrPlan));
-            meta.pendingReducers(allReducers(mrPlan));
-
-            GridFutureAdapter<HadoopJobId> completeFut = new GridFutureAdapter<>();
-
-            GridFutureAdapter<HadoopJobId> old = activeFinishFuts.put(jobId, completeFut);
-
-            assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']';
-
-            if (log.isDebugEnabled())
-                log.debug("Submitting job metadata [jobId=" + jobId + ", meta=" + meta + ']');
-
-            long jobStart = U.currentTimeMillis();
-
-            HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(),
-                ctx.localNodeId());
-
-            perfCntr.clientSubmissionEvents(info);
-            perfCntr.onJobPrepare(jobPrepare);
-            perfCntr.onJobStart(jobStart);
-
-            if (jobMetaCache().getAndPutIfAbsent(jobId, meta) != null)
-                throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
-
-            return completeFut;
-        }
-        catch (IgniteCheckedException e) {
-            U.error(log, "Failed to submit job: " + jobId, e);
-
-            return new GridFinishedFuture<>(e);
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * Convert Hadoop job metadata to job status.
-     *
-     * @param meta Metadata.
-     * @return Status.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public static HadoopJobStatus status(HadoopJobMetadata meta) {
-        HadoopJobInfo jobInfo = meta.jobInfo();
-
-        return new HadoopJobStatus(
-            meta.jobId(),
-            jobInfo.jobName(),
-            jobInfo.user(),
-            meta.pendingSplits() != null ? meta.pendingSplits().size() : 0,
-            meta.pendingReducers() != null ? meta.pendingReducers().size() : 0,
-            meta.mapReducePlan().mappers(),
-            meta.mapReducePlan().reducers(),
-            meta.phase(),
-            meta.failCause() != null,
-            meta.version()
-        );
-    }
-
-    /**
-     * Gets hadoop job status for given job ID.
-     *
-     * @param jobId Job ID to get status for.
-     * @return Job status for given job ID or {@code null} if job was not found.
-     */
-    @Nullable public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock())
-            return null; // Grid is stopping.
-
-        try {
-            HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-            return meta != null ? status(meta) : null;
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * Gets job finish future.
-     *
-     * @param jobId Job ID.
-     * @return Finish future or {@code null}.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable public IgniteInternalFuture<?> finishFuture(HadoopJobId jobId) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock())
-            return null; // Grid is stopping.
-
-        try {
-            HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-            if (meta == null)
-                return null;
-
-            if (log.isTraceEnabled())
-                log.trace("Got job metadata for status check [locNodeId=" + ctx.localNodeId() + ", meta=" + meta + ']');
-
-            if (meta.phase() == PHASE_COMPLETE) {
-                if (log.isTraceEnabled())
-                    log.trace("Job is complete, returning finished future: " + jobId);
-
-                return new GridFinishedFuture<>(jobId);
-            }
-
-            GridFutureAdapter<HadoopJobId> fut = F.addIfAbsent(activeFinishFuts, jobId,
-                new GridFutureAdapter<HadoopJobId>());
-
-            // Get meta from cache one more time to close the window.
-            meta = jobMetaCache().get(jobId);
-
-            if (log.isTraceEnabled())
-                log.trace("Re-checking job metadata [locNodeId=" + ctx.localNodeId() + ", meta=" + meta + ']');
-
-            if (meta == null) {
-                fut.onDone();
-
-                activeFinishFuts.remove(jobId , fut);
-            }
-            else if (meta.phase() == PHASE_COMPLETE) {
-                fut.onDone(jobId, meta.failCause());
-
-                activeFinishFuts.remove(jobId , fut);
-            }
-
-            return fut;
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * Gets job plan by job ID.
-     *
-     * @param jobId Job ID.
-     * @return Job plan.
-     * @throws IgniteCheckedException If failed.
-     */
-    public HadoopMapReducePlan plan(HadoopJobId jobId) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock())
-            return null;
-
-        try {
-            HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-            if (meta != null)
-                return meta.mapReducePlan();
-
-            return null;
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * Callback from task executor invoked when a task has been finished.
-     *
-     * @param info Task info.
-     * @param status Task status.
-     */
-    @SuppressWarnings({"ConstantConditions", "ThrowableResultOfMethodCallIgnored"})
-    public void onTaskFinished(HadoopTaskInfo info, HadoopTaskStatus status) {
-        if (!busyLock.tryReadLock())
-            return;
-
-        try {
-            assert status.state() != RUNNING;
-
-            if (log.isDebugEnabled())
-                log.debug("Received task finished callback [info=" + info + ", status=" + status + ']');
-
-            JobLocalState state = activeJobs.get(info.jobId());
-
-            // Task CRASHes with null fail cause.
-            assert (status.state() != FAILED) || status.failCause() != null :
-                "Invalid task status [info=" + info + ", status=" + status + ']';
-
-            assert state != null || (ctx.jobUpdateLeader() && (info.type() == COMMIT || info.type() == ABORT)):
-                "Missing local state for finished task [info=" + info + ", status=" + status + ']';
-
-            StackedProcessor incrCntrs = null;
-
-            if (status.state() == COMPLETED)
-                incrCntrs = new IncrementCountersProcessor(null, status.counters());
-
-            switch (info.type()) {
-                case SETUP: {
-                    state.onSetupFinished(info, status, incrCntrs);
-
-                    break;
-                }
-
-                case MAP: {
-                    state.onMapFinished(info, status, incrCntrs);
-
-                    break;
-                }
-
-                case REDUCE: {
-                    state.onReduceFinished(info, status, incrCntrs);
-
-                    break;
-                }
-
-                case COMBINE: {
-                    state.onCombineFinished(info, status, incrCntrs);
-
-                    break;
-                }
-
-                case COMMIT:
-                case ABORT: {
-                    IgniteInternalCache<HadoopJobId, HadoopJobMetadata> cache = finishedJobMetaCache();
-
-                    cache.invokeAsync(info.jobId(), new UpdatePhaseProcessor(incrCntrs, PHASE_COMPLETE)).
-                        listen(failsLog);
-
-                    break;
-                }
-            }
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * @param jobId Job id.
-     * @param c Closure of operation.
-     */
-    private void transform(HadoopJobId jobId, EntryProcessor<HadoopJobId, HadoopJobMetadata, Void> c) {
-        jobMetaCache().invokeAsync(jobId, c).listen(failsLog);
-    }
-
-    /**
-     * Callback from task executor called when process is ready to received shuffle messages.
-     *
-     * @param jobId Job ID.
-     * @param reducers Reducers.
-     * @param desc Process descriptor.
-     */
-    public void onExternalMappersInitialized(HadoopJobId jobId, Collection<Integer> reducers,
-        HadoopProcessDescriptor desc) {
-        transform(jobId, new InitializeReducersProcessor(null, reducers, desc));
-    }
-
-    /**
-     * Gets all input splits for given hadoop map-reduce plan.
-     *
-     * @param plan Map-reduce plan.
-     * @return Collection of all input splits that should be processed.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private Map<HadoopInputSplit, Integer> allSplits(HadoopMapReducePlan plan) {
-        Map<HadoopInputSplit, Integer> res = new HashMap<>();
-
-        int taskNum = 0;
-
-        for (UUID nodeId : plan.mapperNodeIds()) {
-            for (HadoopInputSplit split : plan.mappers(nodeId)) {
-                if (res.put(split, taskNum++) != null)
-                    throw new IllegalStateException("Split duplicate.");
-            }
-        }
-
-        return res;
-    }
-
-    /**
-     * Gets all reducers for this job.
-     *
-     * @param plan Map-reduce plan.
-     * @return Collection of reducers.
-     */
-    private Collection<Integer> allReducers(HadoopMapReducePlan plan) {
-        Collection<Integer> res = new HashSet<>();
-
-        for (int i = 0; i < plan.reducers(); i++)
-            res.add(i);
-
-        return res;
-    }
-
-    /**
-     * Processes node leave (or fail) event.
-     *
-     * @param evt Discovery event.
-     */
-    @SuppressWarnings("ConstantConditions")
-    private void processNodeLeft(DiscoveryEvent evt) {
-        if (log.isDebugEnabled())
-            log.debug("Processing discovery event [locNodeId=" + ctx.localNodeId() + ", evt=" + evt + ']');
-
-        // Check only if this node is responsible for job status updates.
-        if (ctx.jobUpdateLeader()) {
-            boolean checkSetup = evt.eventNode().order() < ctx.localNodeOrder();
-
-            // Iteration over all local entries is correct since system cache is REPLICATED.
-            for (Object metaObj : jobMetaCache().values()) {
-                HadoopJobMetadata meta = (HadoopJobMetadata)metaObj;
-
-                HadoopJobId jobId = meta.jobId();
-
-                HadoopMapReducePlan plan = meta.mapReducePlan();
-
-                HadoopJobPhase phase = meta.phase();
-
-                try {
-                    if (checkSetup && phase == PHASE_SETUP && !activeJobs.containsKey(jobId)) {
-                        // Failover setup task.
-                        HadoopJob job = job(jobId, meta.jobInfo());
-
-                        Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
-
-                        assert setupTask != null;
-
-                        ctx.taskExecutor().run(job, setupTask);
-                    }
-                    else if (phase == PHASE_MAP || phase == PHASE_REDUCE) {
-                        // Must check all nodes, even that are not event node ID due to
-                        // multiple node failure possibility.
-                        Collection<HadoopInputSplit> cancelSplits = null;
-
-                        for (UUID nodeId : plan.mapperNodeIds()) {
-                            if (ctx.kernalContext().discovery().node(nodeId) == null) {
-                                // Node has left the grid.
-                                Collection<HadoopInputSplit> mappers = plan.mappers(nodeId);
-
-                                if (cancelSplits == null)
-                                    cancelSplits = new HashSet<>();
-
-                                cancelSplits.addAll(mappers);
-                            }
-                        }
-
-                        Collection<Integer> cancelReducers = null;
-
-                        for (UUID nodeId : plan.reducerNodeIds()) {
-                            if (ctx.kernalContext().discovery().node(nodeId) == null) {
-                                // Node has left the grid.
-                                int[] reducers = plan.reducers(nodeId);
-
-                                if (cancelReducers == null)
-                                    cancelReducers = new HashSet<>();
-
-                                for (int rdc : reducers)
-                                    cancelReducers.add(rdc);
-                            }
-                        }
-
-                        if (cancelSplits != null || cancelReducers != null)
-                            jobMetaCache().invoke(meta.jobId(), new CancelJobProcessor(null, new IgniteCheckedException(
-                                "One or more nodes participating in map-reduce job execution failed."), cancelSplits,
-                                cancelReducers));
-                    }
-                }
-                catch (IgniteCheckedException e) {
-                    U.error(log, "Failed to cancel job: " + meta, e);
-                }
-            }
-        }
-    }
-
-    /**
-     * @param updated Updated cache entries.
-     * @throws IgniteCheckedException If failed.
-     */
-    private void processJobMetadataUpdates(
-        Iterable<CacheEntryEvent<? extends HadoopJobId, ? extends HadoopJobMetadata>> updated)
-        throws IgniteCheckedException {
-        UUID locNodeId = ctx.localNodeId();
-
-        for (CacheEntryEvent<? extends HadoopJobId, ? extends HadoopJobMetadata> entry : updated) {
-            HadoopJobId jobId = entry.getKey();
-            HadoopJobMetadata meta = entry.getValue();
-
-            if (meta == null || !ctx.isParticipating(meta))
-                continue;
-
-            if (log.isDebugEnabled())
-                log.debug("Processing job metadata update callback [locNodeId=" + locNodeId +
-                    ", meta=" + meta + ']');
-
-            try {
-                ctx.taskExecutor().onJobStateChanged(meta);
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to process job state changed callback (will fail the job) " +
-                    "[locNodeId=" + locNodeId + ", jobId=" + jobId + ", meta=" + meta + ']', e);
-
-                transform(jobId, new CancelJobProcessor(null, e));
-
-                continue;
-            }
-
-            processJobMetaUpdate(jobId, meta, locNodeId);
-        }
-    }
-
-    /**
-     * @param jobId  Job ID.
-     * @param plan Map-reduce plan.
-     */
-    @SuppressWarnings({"unused", "ConstantConditions" })
-    private void printPlan(HadoopJobId jobId, HadoopMapReducePlan plan) {
-        log.info("Plan for " + jobId);
-
-        SB b = new SB();
-
-        b.a("   Map: ");
-
-        for (UUID nodeId : plan.mapperNodeIds())
-            b.a(nodeId).a("=").a(plan.mappers(nodeId).size()).a(' ');
-
-        log.info(b.toString());
-
-        b = new SB();
-
-        b.a("   Reduce: ");
-
-        for (UUID nodeId : plan.reducerNodeIds())
-            b.a(nodeId).a("=").a(Arrays.toString(plan.reducers(nodeId))).a(' ');
-
-        log.info(b.toString());
-    }
-
-    /**
-     * @param jobId Job ID.
-     * @param meta Job metadata.
-     * @param locNodeId Local node ID.
-     * @throws IgniteCheckedException If failed.
-     */
-    private void processJobMetaUpdate(HadoopJobId jobId, HadoopJobMetadata meta, UUID locNodeId)
-        throws IgniteCheckedException {
-        JobLocalState state = activeJobs.get(jobId);
-
-        HadoopJob job = job(jobId, meta.jobInfo());
-
-        HadoopMapReducePlan plan = meta.mapReducePlan();
-
-        switch (meta.phase()) {
-            case PHASE_SETUP: {
-                if (ctx.jobUpdateLeader()) {
-                    Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
-
-                    if (setupTask != null)
-                        ctx.taskExecutor().run(job, setupTask);
-                }
-
-                break;
-            }
-
-            case PHASE_MAP: {
-                // Check if we should initiate new task on local node.
-                Collection<HadoopTaskInfo> tasks = mapperTasks(plan.mappers(locNodeId), meta);
-
-                if (tasks != null)
-                    ctx.taskExecutor().run(job, tasks);
-
-                break;
-            }
-
-            case PHASE_REDUCE: {
-                if (meta.pendingReducers().isEmpty() && ctx.jobUpdateLeader()) {
-                    HadoopTaskInfo info = new HadoopTaskInfo(COMMIT, jobId, 0, 0, null);
-
-                    if (log.isDebugEnabled())
-                        log.debug("Submitting COMMIT task for execution [locNodeId=" + locNodeId +
-                                ", jobId=" + jobId + ']');
-
-                    ctx.taskExecutor().run(job, Collections.singletonList(info));
-
-                    break;
-                }
-
-                Collection<HadoopTaskInfo> tasks = reducerTasks(plan.reducers(locNodeId), job);
-
-                if (tasks != null)
-                    ctx.taskExecutor().run(job, tasks);
-
-                break;
-            }
-
-            case PHASE_CANCELLING: {
-                // Prevent multiple task executor notification.
-                if (state != null && state.onCancel()) {
-                    if (log.isDebugEnabled())
-                        log.debug("Cancelling local task execution for job: " + meta);
-
-                    ctx.taskExecutor().cancelTasks(jobId);
-                }
-
-                if (meta.pendingSplits().isEmpty() && meta.pendingReducers().isEmpty()) {
-                    if (ctx.jobUpdateLeader()) {
-                        if (state == null)
-                            state = initState(jobId);
-
-                        // Prevent running multiple abort tasks.
-                        if (state.onAborted()) {
-                            HadoopTaskInfo info = new HadoopTaskInfo(ABORT, jobId, 0, 0, null);
-
-                            if (log.isDebugEnabled())
-                                log.debug("Submitting ABORT task for execution [locNodeId=" + locNodeId +
-                                        ", jobId=" + jobId + ']');
-
-                            ctx.taskExecutor().run(job, Collections.singletonList(info));
-                        }
-                    }
-
-                    break;
-                }
-                else {
-                    // Check if there are unscheduled mappers or reducers.
-                    Collection<HadoopInputSplit> cancelMappers = new ArrayList<>();
-                    Collection<Integer> cancelReducers = new ArrayList<>();
-
-                    Collection<HadoopInputSplit> mappers = plan.mappers(ctx.localNodeId());
-
-                    if (mappers != null) {
-                        for (HadoopInputSplit b : mappers) {
-                            if (state == null || !state.mapperScheduled(b))
-                                cancelMappers.add(b);
-                        }
-                    }
-
-                    int[] rdc = plan.reducers(ctx.localNodeId());
-
-                    if (rdc != null) {
-                        for (int r : rdc) {
-                            if (state == null || !state.reducerScheduled(r))
-                                cancelReducers.add(r);
-                        }
-                    }
-
-                    if (!cancelMappers.isEmpty() || !cancelReducers.isEmpty())
-                        transform(jobId, new CancelJobProcessor(null, cancelMappers, cancelReducers));
-                }
-
-                break;
-            }
-
-            case PHASE_COMPLETE: {
-                if (log.isDebugEnabled())
-                    log.debug("Job execution is complete, will remove local state from active jobs " +
-                        "[jobId=" + jobId + ", meta=" + meta + ']');
-
-                if (state != null) {
-                    state = activeJobs.remove(jobId);
-
-                    assert state != null;
-
-                    ctx.shuffle().jobFinished(jobId);
-                }
-
-                GridFutureAdapter<HadoopJobId> finishFut = activeFinishFuts.remove(jobId);
-
-                if (finishFut != null) {
-                    if (log.isDebugEnabled())
-                        log.debug("Completing job future [locNodeId=" + locNodeId + ", meta=" + meta + ']');
-
-                    finishFut.onDone(jobId, meta.failCause());
-                }
-
-                assert job != null;
-
-                if (ctx.jobUpdateLeader())
-                    job.cleanupStagingDirectory();
-
-                jobs.remove(jobId);
-
-                if (ctx.jobUpdateLeader()) {
-                    ClassLoader ldr = job.getClass().getClassLoader();
-
-                    try {
-                        String statWriterClsName = job.info().property(HadoopUtils.JOB_COUNTER_WRITER_PROPERTY);
-
-                        if (statWriterClsName != null) {
-                            Class<?> cls = ldr.loadClass(statWriterClsName);
-
-                            HadoopCounterWriter writer = (HadoopCounterWriter)cls.newInstance();
-
-                            HadoopCounters cntrs = meta.counters();
-
-                            writer.write(job, cntrs);
-                        }
-                    }
-                    catch (Exception e) {
-                        log.error("Can't write statistic due to: ", e);
-                    }
-                }
-
-                job.dispose(false);
-
-                break;
-            }
-
-            default:
-                throw new IllegalStateException("Unknown phase: " + meta.phase());
-        }
-    }
-
-    /**
-     * Creates setup task based on job information.
-     *
-     * @param jobId Job ID.
-     * @return Setup task wrapped in collection.
-     */
-    @Nullable private Collection<HadoopTaskInfo> setupTask(HadoopJobId jobId) {
-        if (activeJobs.containsKey(jobId))
-            return null;
-        else {
-            initState(jobId);
-
-            return Collections.singleton(new HadoopTaskInfo(SETUP, jobId, 0, 0, null));
-        }
-    }
-
-    /**
-     * Creates mapper tasks based on job information.
-     *
-     * @param mappers Mapper blocks.
-     * @param meta Job metadata.
-     * @return Collection of created task infos or {@code null} if no mapper tasks scheduled for local node.
-     */
-    private Collection<HadoopTaskInfo> mapperTasks(Iterable<HadoopInputSplit> mappers, HadoopJobMetadata meta) {
-        UUID locNodeId = ctx.localNodeId();
-        HadoopJobId jobId = meta.jobId();
-
-        JobLocalState state = activeJobs.get(jobId);
-
-        Collection<HadoopTaskInfo> tasks = null;
-
-        if (mappers != null) {
-            if (state == null)
-                state = initState(jobId);
-
-            for (HadoopInputSplit split : mappers) {
-                if (state.addMapper(split)) {
-                    if (log.isDebugEnabled())
-                        log.debug("Submitting MAP task for execution [locNodeId=" + locNodeId +
-                            ", split=" + split + ']');
-
-                    HadoopTaskInfo taskInfo = new HadoopTaskInfo(MAP, jobId, meta.taskNumber(split), 0, split);
-
-                    if (tasks == null)
-                        tasks = new ArrayList<>();
-
-                    tasks.add(taskInfo);
-                }
-            }
-        }
-
-        return tasks;
-    }
-
-    /**
-     * Creates reducer tasks based on job information.
-     *
-     * @param reducers Reducers (may be {@code null}).
-     * @param job Job instance.
-     * @return Collection of task infos.
-     */
-    private Collection<HadoopTaskInfo> reducerTasks(int[] reducers, HadoopJob job) {
-        UUID locNodeId = ctx.localNodeId();
-        HadoopJobId jobId = job.id();
-
-        JobLocalState state = activeJobs.get(jobId);
-
-        Collection<HadoopTaskInfo> tasks = null;
-
-        if (reducers != null) {
-            if (state == null)
-                state = initState(job.id());
-
-            for (int rdc : reducers) {
-                if (state.addReducer(rdc)) {
-                    if (log.isDebugEnabled())
-                        log.debug("Submitting REDUCE task for execution [locNodeId=" + locNodeId +
-                            ", rdc=" + rdc + ']');
-
-                    HadoopTaskInfo taskInfo = new HadoopTaskInfo(REDUCE, jobId, rdc, 0, null);
-
-                    if (tasks == null)
-                        tasks = new ArrayList<>();
-
-                    tasks.add(taskInfo);
-                }
-            }
-        }
-
-        return tasks;
-    }
-
-    /**
-     * Initializes local state for given job metadata.
-     *
-     * @param jobId Job ID.
-     * @return Local state.
-     */
-    private JobLocalState initState(HadoopJobId jobId) {
-        return F.addIfAbsent(activeJobs, jobId, new JobLocalState());
-    }
-
-    /**
-     * Gets or creates job instance.
-     *
-     * @param jobId Job ID.
-     * @param jobInfo Job info.
-     * @return Job.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable public HadoopJob job(HadoopJobId jobId, @Nullable HadoopJobInfo jobInfo) throws IgniteCheckedException {
-        GridFutureAdapter<HadoopJob> fut = jobs.get(jobId);
-
-        if (fut != null || (fut = jobs.putIfAbsent(jobId, new GridFutureAdapter<HadoopJob>())) != null)
-            return fut.get();
-
-        fut = jobs.get(jobId);
-
-        HadoopJob job = null;
-
-        try {
-            if (jobInfo == null) {
-                HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-                if (meta == null)
-                    throw new IgniteCheckedException("Failed to find job metadata for ID: " + jobId);
-
-                jobInfo = meta.jobInfo();
-            }
-
-            job = jobInfo.createJob(jobCls, jobId, log, ctx.configuration().getNativeLibraryNames());
-
-            job.initialize(false, ctx.localNodeId());
-
-            fut.onDone(job);
-
-            return job;
-        }
-        catch (IgniteCheckedException e) {
-            fut.onDone(e);
-
-            jobs.remove(jobId, fut);
-
-            if (job != null) {
-                try {
-                    job.dispose(false);
-                }
-                catch (IgniteCheckedException e0) {
-                    U.error(log, "Failed to dispose job: " + jobId, e0);
-                }
-            }
-
-            throw e;
-        }
-    }
-
-    /**
-     * Kills job.
-     *
-     * @param jobId Job ID.
-     * @return {@code True} if job was killed.
-     * @throws IgniteCheckedException If failed.
-     */
-    public boolean killJob(HadoopJobId jobId) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock())
-            return false; // Grid is stopping.
-
-        try {
-            HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-            if (meta != null && meta.phase() != PHASE_COMPLETE && meta.phase() != PHASE_CANCELLING) {
-                HadoopTaskCancelledException err = new HadoopTaskCancelledException("Job cancelled.");
-
-                jobMetaCache().invoke(jobId, new CancelJobProcessor(null, err));
-            }
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-
-        IgniteInternalFuture<?> fut = finishFuture(jobId);
-
-        if (fut != null) {
-            try {
-                fut.get();
-            }
-            catch (Exception e) {
-                if (e.getCause() instanceof HadoopTaskCancelledException)
-                    return true;
-            }
-        }
-
-        return false;
-    }
-
-    /**
-     * Returns job counters.
-     *
-     * @param jobId Job identifier.
-     * @return Job counters or {@code null} if job cannot be found.
-     * @throws IgniteCheckedException If failed.
-     */
-    @Nullable public HadoopCounters jobCounters(HadoopJobId jobId) throws IgniteCheckedException {
-        if (!busyLock.tryReadLock())
-            return null;
-
-        try {
-            final HadoopJobMetadata meta = jobMetaCache().get(jobId);
-
-            return meta != null ? meta.counters() : null;
-        }
-        finally {
-            busyLock.readUnlock();
-        }
-    }
-
-    /**
-     * Event handler protected by busy lock.
-     */
-    private abstract class EventHandler implements Runnable {
-        /** {@inheritDoc} */
-        @Override public void run() {
-            if (!busyLock.tryReadLock())
-                return;
-
-            try {
-                body();
-            }
-            catch (Throwable e) {
-                U.error(log, "Unhandled exception while processing event.", e);
-
-                if (e instanceof Error)
-                    throw (Error)e;
-            }
-            finally {
-                busyLock.readUnlock();
-            }
-        }
-
-        /**
-         * Handler body.
-         */
-        protected abstract void body() throws Exception;
-    }
-
-    /**
-     *
-     */
-    private class JobLocalState {
-        /** Mappers. */
-        private final Collection<HadoopInputSplit> currMappers = new HashSet<>();
-
-        /** Reducers. */
-        private final Collection<Integer> currReducers = new HashSet<>();
-
-        /** Number of completed mappers. */
-        private final AtomicInteger completedMappersCnt = new AtomicInteger();
-
-        /** Cancelled flag. */
-        private boolean cancelled;
-
-        /** Aborted flag. */
-        private boolean aborted;
-
-        /**
-         * @param mapSplit Map split to add.
-         * @return {@code True} if mapper was added.
-         */
-        private boolean addMapper(HadoopInputSplit mapSplit) {
-            return currMappers.add(mapSplit);
-        }
-
-        /**
-         * @param rdc Reducer number to add.
-         * @return {@code True} if reducer was added.
-         */
-        private boolean addReducer(int rdc) {
-            return currReducers.add(rdc);
-        }
-
-        /**
-         * Checks whether this split was scheduled for given attempt.
-         *
-         * @param mapSplit Map split to check.
-         * @return {@code True} if mapper was scheduled.
-         */
-        public boolean mapperScheduled(HadoopInputSplit mapSplit) {
-            return currMappers.contains(mapSplit);
-        }
-
-        /**
-         * Checks whether this split was scheduled for given attempt.
-         *
-         * @param rdc Reducer number to check.
-         * @return {@code True} if reducer was scheduled.
-         */
-        public boolean reducerScheduled(int rdc) {
-            return currReducers.contains(rdc);
-        }
-
-        /**
-         * @param taskInfo Task info.
-         * @param status Task status.
-         * @param prev Previous closure.
-         */
-        private void onSetupFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) {
-            final HadoopJobId jobId = taskInfo.jobId();
-
-            if (status.state() == FAILED || status.state() == CRASHED)
-                transform(jobId, new CancelJobProcessor(prev, status.failCause()));
-            else
-                transform(jobId, new UpdatePhaseProcessor(prev, PHASE_MAP));
-        }
-
-        /**
-         * @param taskInfo Task info.
-         * @param status Task status.
-         * @param prev Previous closure.
-         */
-        private void onMapFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status,
-            final StackedProcessor prev) {
-            final HadoopJobId jobId = taskInfo.jobId();
-
-            boolean lastMapperFinished = completedMappersCnt.incrementAndGet() == currMappers.size();
-
-            if (status.state() == FAILED || status.state() == CRASHED) {
-                // Fail the whole job.
-                transform(jobId, new RemoveMappersProcessor(prev, taskInfo.inputSplit(), status.failCause()));
-
-                return;
-            }
-
-            IgniteInClosure<IgniteInternalFuture<?>> cacheUpdater = new CIX1<IgniteInternalFuture<?>>() {
-                @Override public void applyx(IgniteInternalFuture<?> f) {
-                    Throwable err = null;
-
-                    if (f != null) {
-                        try {
-                            f.get();
-                        }
-                        catch (IgniteCheckedException e) {
-                            err = e;
-                        }
-                    }
-
-                    transform(jobId, new RemoveMappersProcessor(prev, taskInfo.inputSplit(), err));
-                }
-            };
-
-            if (lastMapperFinished)
-                ctx.shuffle().flush(jobId).listen(cacheUpdater);
-            else
-                cacheUpdater.apply(null);
-        }
-
-        /**
-         * @param taskInfo Task info.
-         * @param status Task status.
-         * @param prev Previous closure.
-         */
-        private void onReduceFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) {
-            HadoopJobId jobId = taskInfo.jobId();
-            if (status.state() == FAILED || status.state() == CRASHED)
-                // Fail the whole job.
-                transform(jobId, new RemoveReducerProcessor(prev, taskInfo.taskNumber(), status.failCause()));
-            else
-                transform(jobId, new RemoveReducerProcessor(prev, taskInfo.taskNumber()));
-        }
-
-        /**
-         * @param taskInfo Task info.
-         * @param status Task status.
-         * @param prev Previous closure.
-         */
-        private void onCombineFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status,
-            final StackedProcessor prev) {
-            final HadoopJobId jobId = taskInfo.jobId();
-
-            if (status.state() == FAILED || status.state() == CRASHED)
-                // Fail the whole job.
-                transform(jobId, new RemoveMappersProcessor(prev, currMappers, status.failCause()));
-            else {
-                ctx.shuffle().flush(jobId).listen(new CIX1<IgniteInternalFuture<?>>() {
-                    @Override public void applyx(IgniteInternalFuture<?> f) {
-                        Throwable err = null;
-
-                        if (f != null) {
-                            try {
-                                f.get();
-                            }
-                            catch (IgniteCheckedException e) {
-                                err = e;
-                            }
-                        }
-
-                        transform(jobId, new RemoveMappersProcessor(prev, currMappers, err));
-                    }
-                });
-            }
-        }
-
-        /**
-         * @return {@code True} if job was cancelled by this (first) call.
-         */
-        public boolean onCancel() {
-            if (!cancelled && !aborted) {
-                cancelled = true;
-
-                return true;
-            }
-
-            return false;
-        }
-
-        /**
-         * @return {@code True} if job was aborted this (first) call.
-         */
-        public boolean onAborted() {
-            if (!aborted) {
-                aborted = true;
-
-                return true;
-            }
-
-            return false;
-        }
-    }
-
-    /**
-     * Update job phase transform closure.
-     */
-    private static class UpdatePhaseProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Phase to update. */
-        private final HadoopJobPhase phase;
-
-        /**
-         * @param prev Previous closure.
-         * @param phase Phase to update.
-         */
-        private UpdatePhaseProcessor(@Nullable StackedProcessor prev, HadoopJobPhase phase) {
-            super(prev);
-
-            this.phase = phase;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            cp.phase(phase);
-        }
-    }
-
-    /**
-     * Remove mapper transform closure.
-     */
-    private static class RemoveMappersProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Mapper split to remove. */
-        private final Collection<HadoopInputSplit> splits;
-
-        /** Error. */
-        private final Throwable err;
-
-        /**
-         * @param prev Previous closure.
-         * @param split Mapper split to remove.
-         * @param err Error.
-         */
-        private RemoveMappersProcessor(@Nullable StackedProcessor prev, HadoopInputSplit split, Throwable err) {
-            this(prev, Collections.singletonList(split), err);
-        }
-
-        /**
-         * @param prev Previous closure.
-         * @param splits Mapper splits to remove.
-         * @param err Error.
-         */
-        private RemoveMappersProcessor(@Nullable StackedProcessor prev, Collection<HadoopInputSplit> splits,
-            Throwable err) {
-            super(prev);
-
-            this.splits = splits;
-            this.err = err;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            Map<HadoopInputSplit, Integer> splitsCp = new HashMap<>(cp.pendingSplits());
-
-            for (HadoopInputSplit s : splits)
-                splitsCp.remove(s);
-
-            cp.pendingSplits(splitsCp);
-
-            if (cp.phase() != PHASE_CANCELLING && err != null)
-                cp.failCause(err);
-
-            if (err != null)
-                cp.phase(PHASE_CANCELLING);
-
-            if (splitsCp.isEmpty()) {
-                if (cp.phase() != PHASE_CANCELLING)
-                    cp.phase(PHASE_REDUCE);
-            }
-        }
-    }
-
-    /**
-     * Remove reducer transform closure.
-     */
-    private static class RemoveReducerProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Mapper split to remove. */
-        private final int rdc;
-
-        /** Error. */
-        private Throwable err;
-
-        /**
-         * @param prev Previous closure.
-         * @param rdc Reducer to remove.
-         */
-        private RemoveReducerProcessor(@Nullable StackedProcessor prev, int rdc) {
-            super(prev);
-
-            this.rdc = rdc;
-        }
-
-        /**
-         * @param prev Previous closure.
-         * @param rdc Reducer to remove.
-         * @param err Error.
-         */
-        private RemoveReducerProcessor(@Nullable StackedProcessor prev, int rdc, Throwable err) {
-            super(prev);
-
-            this.rdc = rdc;
-            this.err = err;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            Collection<Integer> rdcCp = new HashSet<>(cp.pendingReducers());
-
-            rdcCp.remove(rdc);
-
-            cp.pendingReducers(rdcCp);
-
-            if (err != null) {
-                cp.phase(PHASE_CANCELLING);
-                cp.failCause(err);
-            }
-        }
-    }
-
-    /**
-     * Initialize reducers.
-     */
-    private static class InitializeReducersProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Reducers. */
-        private final Collection<Integer> rdc;
-
-        /** Process descriptor for reducers. */
-        private final HadoopProcessDescriptor desc;
-
-        /**
-         * @param prev Previous closure.
-         * @param rdc Reducers to initialize.
-         * @param desc External process descriptor.
-         */
-        private InitializeReducersProcessor(@Nullable StackedProcessor prev,
-            Collection<Integer> rdc,
-            HadoopProcessDescriptor desc) {
-            super(prev);
-
-            assert !F.isEmpty(rdc);
-            assert desc != null;
-
-            this.rdc = rdc;
-            this.desc = desc;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            Map<Integer, HadoopProcessDescriptor> oldMap = meta.reducersAddresses();
-
-            Map<Integer, HadoopProcessDescriptor> rdcMap = oldMap == null ?
-                new HashMap<Integer, HadoopProcessDescriptor>() : new HashMap<>(oldMap);
-
-            for (Integer r : rdc)
-                rdcMap.put(r, desc);
-
-            cp.reducersAddresses(rdcMap);
-        }
-    }
-
-    /**
-     * Remove reducer transform closure.
-     */
-    private static class CancelJobProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Mapper split to remove. */
-        private final Collection<HadoopInputSplit> splits;
-
-        /** Reducers to remove. */
-        private final Collection<Integer> rdc;
-
-        /** Error. */
-        private final Throwable err;
-
-        /**
-         * @param prev Previous closure.
-         * @param err Fail cause.
-         */
-        private CancelJobProcessor(@Nullable StackedProcessor prev, Throwable err) {
-            this(prev, err, null, null);
-        }
-
-        /**
-         * @param prev Previous closure.
-         * @param splits Splits to remove.
-         * @param rdc Reducers to remove.
-         */
-        private CancelJobProcessor(@Nullable StackedProcessor prev,
-            Collection<HadoopInputSplit> splits,
-            Collection<Integer> rdc) {
-            this(prev, null, splits, rdc);
-        }
-
-        /**
-         * @param prev Previous closure.
-         * @param err Error.
-         * @param splits Splits to remove.
-         * @param rdc Reducers to remove.
-         */
-        private CancelJobProcessor(@Nullable StackedProcessor prev,
-            Throwable err,
-            Collection<HadoopInputSplit> splits,
-            Collection<Integer> rdc) {
-            super(prev);
-
-            this.splits = splits;
-            this.rdc = rdc;
-            this.err = err;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            final HadoopJobPhase currPhase = meta.phase();
-
-            assert currPhase == PHASE_CANCELLING || currPhase == PHASE_COMPLETE
-                    || err != null: "Invalid phase for cancel: " + currPhase;
-
-            Collection<Integer> rdcCp = new HashSet<>(cp.pendingReducers());
-
-            if (rdc != null)
-                rdcCp.removeAll(rdc);
-
-            cp.pendingReducers(rdcCp);
-
-            Map<HadoopInputSplit, Integer> splitsCp = new HashMap<>(cp.pendingSplits());
-
-            if (splits != null) {
-                for (HadoopInputSplit s : splits)
-                    splitsCp.remove(s);
-            }
-
-            cp.pendingSplits(splitsCp);
-
-            if (currPhase != PHASE_COMPLETE && currPhase != PHASE_CANCELLING)
-                cp.phase(PHASE_CANCELLING);
-
-            if (err != null)
-                cp.failCause(err);
-        }
-    }
-
-    /**
-     * Increment counter values closure.
-     */
-    private static class IncrementCountersProcessor extends StackedProcessor {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** */
-        private final HadoopCounters counters;
-
-        /**
-         * @param prev Previous closure.
-         * @param counters Task counters to add into job counters.
-         */
-        private IncrementCountersProcessor(@Nullable StackedProcessor prev, HadoopCounters counters) {
-            super(prev);
-
-            assert counters != null;
-
-            this.counters = counters;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) {
-            HadoopCounters cntrs = new HadoopCountersImpl(cp.counters());
-
-            cntrs.merge(counters);
-
-            cp.counters(cntrs);
-        }
-    }
-
-    /**
-     * Abstract stacked closure.
-     */
-    private abstract static class StackedProcessor implements
-        EntryProcessor<HadoopJobId, HadoopJobMetadata, Void>, Serializable {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** */
-        private final StackedProcessor prev;
-
-        /**
-         * @param prev Previous closure.
-         */
-        private StackedProcessor(@Nullable StackedProcessor prev) {
-            this.prev = prev;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Void process(MutableEntry<HadoopJobId, HadoopJobMetadata> e, Object... args) {
-            HadoopJobMetadata val = apply(e.getValue());
-
-            if (val != null)
-                e.setValue(val);
-            else
-                e.remove();
-
-            return null;
-        }
-
-        /**
-         * @param meta Old value.
-         * @return New value.
-         */
-        private HadoopJobMetadata apply(HadoopJobMetadata meta) {
-            if (meta == null)
-                return null;
-
-            HadoopJobMetadata cp = prev != null ? prev.apply(meta) : new HadoopJobMetadata(meta);
-
-            update(meta, cp);
-
-            return cp;
-        }
-
-        /**
-         * Update given job metadata object.
-         *
-         * @param meta Initial job metadata.
-         * @param cp Copy.
-         */
-        protected abstract void update(HadoopJobMetadata meta, HadoopJobMetadata cp);
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
deleted file mode 100644
index 0d7bd3a..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.message;
-
-import java.io.Externalizable;
-
-/**
- * Marker interface for all hadoop messages.
- */
-public interface HadoopMessage extends Externalizable {
-    // No-op.
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java
index 15c62c8..7aaf3fa 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java
@@ -17,13 +17,14 @@
 
 package org.apache.ignite.internal.processors.hadoop.planner;
 
-import java.util.Collection;
-import java.util.Map;
-import java.util.UUID;
 import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
 import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
 import org.jetbrains.annotations.Nullable;
 
+import java.util.Collection;
+import java.util.Map;
+import java.util.UUID;
+
 /**
  * Map-reduce plan.
  */

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
deleted file mode 100644
index 5f96e08..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.mapreduce.Cluster;
-import org.apache.hadoop.mapreduce.ClusterMetrics;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.QueueAclsInfo;
-import org.apache.hadoop.mapreduce.QueueInfo;
-import org.apache.hadoop.mapreduce.TaskAttemptID;
-import org.apache.hadoop.mapreduce.TaskCompletionEvent;
-import org.apache.hadoop.mapreduce.TaskReport;
-import org.apache.hadoop.mapreduce.TaskTrackerInfo;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.LogParams;
-import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.token.Token;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.client.GridClient;
-import org.apache.ignite.internal.client.GridClientException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobProperty;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReduceCounters;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_SUBMISSION_START_TS_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.REQ_NEW_JOBID_TS_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.RESPONSE_NEW_JOBID_TS_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
-
-/**
- * Hadoop client protocol.
- */
-public class HadoopClientProtocol implements ClientProtocol {
-    /** Protocol version. */
-    private static final long PROTO_VER = 1L;
-
-    /** Default Ignite system directory. */
-    private static final String SYS_DIR = ".ignite/system";
-
-    /** Configuration. */
-    private final Configuration conf;
-
-    /** Ignite client. */
-    private volatile GridClient cli;
-
-    /** Last received version. */
-    private long lastVer = -1;
-
-    /** Last received status. */
-    private HadoopJobStatus lastStatus;
-
-    /**
-     * Constructor.
-     *
-     * @param conf Configuration.
-     * @param cli Ignite client.
-     */
-    public HadoopClientProtocol(Configuration conf, GridClient cli) {
-        assert cli != null;
-
-        this.conf = conf;
-        this.cli = cli;
-    }
-
-    /** {@inheritDoc} */
-    @Override public JobID getNewJobID() throws IOException, InterruptedException {
-        try {
-            conf.setLong(REQ_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
-
-            HadoopJobId jobID = cli.compute().execute(HadoopProtocolNextTaskIdTask.class.getName(), null);
-
-            conf.setLong(RESPONSE_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
-
-            return new JobID(jobID.globalId().toString(), jobID.localId());
-        }
-        catch (GridClientException e) {
-            throw new IOException("Failed to get new job ID.", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts) throws IOException,
-        InterruptedException {
-        try {
-            conf.setLong(JOB_SUBMISSION_START_TS_PROPERTY, U.currentTimeMillis());
-
-            HadoopJobStatus status = cli.compute().execute(HadoopProtocolSubmitJobTask.class.getName(),
-                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), createJobInfo(conf)));
-
-            if (status == null)
-                throw new IOException("Failed to submit job (null status obtained): " + jobId);
-
-            return processStatus(status);
-        }
-        catch (GridClientException | IgniteCheckedException e) {
-            throw new IOException("Failed to submit job.", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public ClusterMetrics getClusterMetrics() throws IOException, InterruptedException {
-        return new ClusterMetrics(0, 0, 0, 0, 0, 0, 1000, 1000, 1, 100, 0, 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Cluster.JobTrackerStatus getJobTrackerStatus() throws IOException, InterruptedException {
-        return Cluster.JobTrackerStatus.RUNNING;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public AccessControlList getQueueAdmins(String queueName) throws IOException {
-        return new AccessControlList("*");
-    }
-
-    /** {@inheritDoc} */
-    @Override public void killJob(JobID jobId) throws IOException, InterruptedException {
-        try {
-            cli.compute().execute(HadoopProtocolKillJobTask.class.getName(),
-                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()));
-        }
-        catch (GridClientException e) {
-            throw new IOException("Failed to kill job: " + jobId, e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setJobPriority(JobID jobid, String priority) throws IOException, InterruptedException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException,
-        InterruptedException {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public JobStatus getJobStatus(JobID jobId) throws IOException, InterruptedException {
-        try {
-            Long delay = conf.getLong(HadoopJobProperty.JOB_STATUS_POLL_DELAY.propertyName(), -1);
-
-            HadoopProtocolTaskArguments args = delay >= 0 ?
-                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), delay) :
-                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId());
-
-            HadoopJobStatus status = cli.compute().execute(HadoopProtocolJobStatusTask.class.getName(), args);
-
-            if (status == null)
-                throw new IOException("Job tracker doesn't have any information about the job: " + jobId);
-
-            return processStatus(status);
-        }
-        catch (GridClientException e) {
-            throw new IOException("Failed to get job status: " + jobId, e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Counters getJobCounters(JobID jobId) throws IOException, InterruptedException {
-        try {
-            final HadoopCounters counters = cli.compute().execute(HadoopProtocolJobCountersTask.class.getName(),
-                new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()));
-
-            if (counters == null)
-                throw new IOException("Job tracker doesn't have any information about the job: " + jobId);
-
-            return new HadoopMapReduceCounters(counters);
-        }
-        catch (GridClientException e) {
-            throw new IOException("Failed to get job counters: " + jobId, e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException {
-        return new TaskReport[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getFilesystemName() throws IOException, InterruptedException {
-        return FileSystem.get(conf).getUri().toString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public JobStatus[] getAllJobs() throws IOException, InterruptedException {
-        return new JobStatus[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid, int fromEventId, int maxEvents)
-        throws IOException, InterruptedException {
-        return new TaskCompletionEvent[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public String[] getTaskDiagnostics(TaskAttemptID taskId) throws IOException, InterruptedException {
-        return new String[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException {
-        return new TaskTrackerInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException, InterruptedException {
-        return new TaskTrackerInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getSystemDir() throws IOException, InterruptedException {
-        Path sysDir = new Path(SYS_DIR);
-
-        return sysDir.toString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getStagingAreaDir() throws IOException, InterruptedException {
-        String usr = UserGroupInformation.getCurrentUser().getShortUserName();
-
-        return HadoopUtils.stagingAreaDir(conf, usr).toString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getJobHistoryDir() throws IOException, InterruptedException {
-        return JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
-    }
-
-    /** {@inheritDoc} */
-    @Override public QueueInfo[] getQueues() throws IOException, InterruptedException {
-        return new QueueInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public QueueInfo getQueue(String queueName) throws IOException, InterruptedException {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException, InterruptedException {
-        return new QueueAclsInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
-        return new QueueInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public QueueInfo[] getChildQueues(String queueName) throws IOException, InterruptedException {
-        return new QueueInfo[0];
-    }
-
-    /** {@inheritDoc} */
-    @Override public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException,
-        InterruptedException {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long renewDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException,
-        InterruptedException {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException,
-        InterruptedException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException,
-        InterruptedException {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getProtocolVersion(String protocol, long clientVersion) throws IOException {
-        return PROTO_VER;
-    }
-
-    /** {@inheritDoc} */
-    @Override public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int clientMethodsHash)
-        throws IOException {
-        return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion, clientMethodsHash);
-    }
-
-    /**
-     * Process received status update.
-     *
-     * @param status Ignite status.
-     * @return Hadoop status.
-     */
-    private JobStatus processStatus(HadoopJobStatus status) {
-        // IMPORTANT! This method will only work in single-threaded environment. It is valid at the moment because
-        // IgniteHadoopClientProtocolProvider creates new instance of this class for every new job and Job class
-        // serializes invocations of submitJob() and getJobStatus() methods. However, if any of these conditions will
-        // change in future and either protocol will serve statuses for several jobs or status update will not be
-        // serialized anymore, then we have to fallback to concurrent approach (e.g. using ConcurrentHashMap).
-        // (vozerov)
-        if (lastVer < status.version()) {
-            lastVer = status.version();
-
-            lastStatus = status;
-        }
-        else
-            assert lastStatus != null;
-
-        return HadoopUtils.status(lastStatus, conf);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
deleted file mode 100644
index 8f0271c..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.util.UUID;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-
-/**
- * Task to get job counters.
- */
-public class HadoopProtocolJobCountersTask extends HadoopProtocolTaskAdapter<HadoopCounters> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public HadoopCounters run(ComputeJobContext jobCtx, Hadoop hadoop,
-        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
-
-        UUID nodeId = UUID.fromString(args.<String>get(0));
-        Integer id = args.get(1);
-
-        assert nodeId != null;
-        assert id != null;
-
-        return hadoop.counters(new HadoopJobId(nodeId, id));
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
deleted file mode 100644
index c08fe77..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.util.UUID;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.lang.IgniteInClosure;
-
-/**
- * Job status task.
- */
-public class HadoopProtocolJobStatusTask extends HadoopProtocolTaskAdapter<HadoopJobStatus> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Default poll delay */
-    private static final long DFLT_POLL_DELAY = 100L;
-
-    /** Attribute for held status. */
-    private static final String ATTR_HELD = "held";
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobStatus run(final ComputeJobContext jobCtx, Hadoop hadoop,
-        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
-        UUID nodeId = UUID.fromString(args.<String>get(0));
-        Integer id = args.get(1);
-        Long pollDelay = args.get(2);
-
-        assert nodeId != null;
-        assert id != null;
-
-        HadoopJobId jobId = new HadoopJobId(nodeId, id);
-
-        if (pollDelay == null)
-            pollDelay = DFLT_POLL_DELAY;
-
-        if (pollDelay > 0) {
-            IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
-
-            if (fut != null) {
-                if (fut.isDone() || F.eq(jobCtx.getAttribute(ATTR_HELD), true))
-                    return hadoop.status(jobId);
-                else {
-                    fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
-                        @Override public void apply(IgniteInternalFuture<?> fut0) {
-                            jobCtx.callcc();
-                        }
-                    });
-
-                    jobCtx.setAttribute(ATTR_HELD, true);
-
-                    return jobCtx.holdcc(pollDelay);
-                }
-            }
-            else
-                return null;
-        }
-        else
-            return hadoop.status(jobId);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
deleted file mode 100644
index 0f65664..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import java.util.UUID;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-
-/**
- * Kill job task.
- */
-public class HadoopProtocolKillJobTask extends HadoopProtocolTaskAdapter<Boolean> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public Boolean run(ComputeJobContext jobCtx, Hadoop hadoop,
-        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
-        UUID nodeId = UUID.fromString(args.<String>get(0));
-        Integer id = args.get(1);
-
-        assert nodeId != null;
-        assert id != null;
-
-        HadoopJobId jobId = new HadoopJobId(nodeId, id);
-
-        return hadoop.kill(jobId);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
deleted file mode 100644
index bde7821..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.proto;
-
-import org.apache.ignite.compute.ComputeJobContext;
-import org.apache.ignite.internal.processors.hadoop.Hadoop;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-
-/**
- * Task to get the next job ID.
- */
-public class HadoopProtocolNextTaskIdTask extends HadoopProtocolTaskAdapter<HadoopJobId> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobId run(ComputeJobContext jobCtx, Hadoop hadoop,
-        HadoopProtocolTaskArguments args) {
-        return hadoop.nextJobId();
-    }
-}
\ No newline at end of file


[02/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
deleted file mode 100644
index a69b72a..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
+++ /dev/null
@@ -1,615 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.hadoop.mapreduce.IgniteHadoopMapReducePlanner;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.hadoop.planner.HadoopAbstractMapReducePlanner;
-import org.apache.ignite.internal.processors.igfs.IgfsBlockLocationImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsIgniteMock;
-import org.apache.ignite.internal.processors.igfs.IgfsMock;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.testframework.GridTestNode;
-import org.apache.ignite.testframework.GridTestUtils;
-
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.UUID;
-
-/**
- *
- */
-public class HadoopDefaultMapReducePlannerSelfTest extends HadoopAbstractSelfTest {
-    /** */
-    private static final UUID ID_1 = new UUID(0, 1);
-
-    /** */
-    private static final UUID ID_2 = new UUID(0, 2);
-
-    /** */
-    private static final UUID ID_3 = new UUID(0, 3);
-
-    /** */
-    private static final String HOST_1 = "host1";
-
-    /** */
-    private static final String HOST_2 = "host2";
-
-    /** */
-    private static final String HOST_3 = "host3";
-
-    /** */
-    private static final String INVALID_HOST_1 = "invalid_host1";
-
-    /** */
-    private static final String INVALID_HOST_2 = "invalid_host2";
-
-    /** */
-    private static final String INVALID_HOST_3 = "invalid_host3";
-
-    /** Mocked IGFS. */
-    private static final IgniteFileSystem IGFS = new MockIgfs();
-
-    /** Mocked Grid. */
-    private static final IgfsIgniteMock GRID = new IgfsIgniteMock(null, IGFS);
-
-    /** Planner. */
-    private static final HadoopMapReducePlanner PLANNER = new IgniteHadoopMapReducePlanner();
-
-    /** Block locations. */
-    private static final Map<Block, Collection<IgfsBlockLocation>> BLOCK_MAP = new HashMap<>();
-
-    /** Proxy map. */
-    private static final Map<URI, Boolean> PROXY_MAP = new HashMap<>();
-
-    /** Last created plan. */
-    private static final ThreadLocal<HadoopMapReducePlan> PLAN = new ThreadLocal<>();
-
-    /**
-     * Static initializer.
-     */
-    static {
-        GridTestUtils.setFieldValue(PLANNER, HadoopAbstractMapReducePlanner.class, "ignite", GRID);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        GridTestUtils.setFieldValue(PLANNER, HadoopAbstractMapReducePlanner.class, "log", log());
-
-        BLOCK_MAP.clear();
-        PROXY_MAP.clear();
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testIgfsOneBlockPerNode() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1);
-        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_2);
-        HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_3);
-
-        mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1));
-        mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_2));
-        mapIgfsBlock(split3.file(), 0, 100, location(0, 100, ID_3));
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureEmpty(ID_2);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1);
-        assert ensureMappers(ID_1, split1);
-        assert ensureReducers(ID_1, 2);
-        assert ensureEmpty(ID_2);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) || ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2, split3);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureMappers(ID_3, split3);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureReducers(ID_3, 1);
-
-        plan(5, split1, split2, split3);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureMappers(ID_3, split3);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testNonIgfsOneBlockPerNode() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1);
-        HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_2);
-        HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_3);
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureEmpty(ID_2);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1);
-        assert ensureMappers(ID_1, split1);
-        assert ensureReducers(ID_1, 2);
-        assert ensureEmpty(ID_2);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) || ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2, split3);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureMappers(ID_3, split3);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureReducers(ID_3, 1);
-
-        plan(5, split1, split2, split3);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureMappers(ID_3, split3);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testIgfsSeveralBlocksPerNode() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2);
-        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2);
-        HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_1, HOST_3);
-
-        mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1, ID_2));
-        mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_1, ID_2));
-        mapIgfsBlock(split3.file(), 0, 100, location(0, 100, ID_1, ID_3));
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split1, split2);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1, split2);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2, split3);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureReducers(ID_3, 1);
-
-        plan(5, split1, split2, split3);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testNonIgfsSeveralBlocksPerNode() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1, HOST_2);
-        HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_1, HOST_2);
-        HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_1, HOST_3);
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split1, split2);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1, split2);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-
-        plan(3, split1, split2, split3);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureReducers(ID_3, 1);
-
-        plan(5, split1, split2, split3);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testIgfsSeveralComplexBlocksPerNode() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2, HOST_3);
-        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2, HOST_3);
-
-        mapIgfsBlock(split1.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_1, ID_3));
-        mapIgfsBlock(split2.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_2, ID_3));
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureEmpty(ID_2);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split2);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_1);
-        assert ensureEmpty(ID_3);
-
-        plan(1, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1) || ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0);
-        assert ensureEmpty(ID_3);
-
-        plan(2, split1, split2);
-        assert ensureMappers(ID_1, split1);
-        assert ensureMappers(ID_2, split2);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureEmpty(ID_3);
-    }
-
-    /**
-     * @throws IgniteCheckedException If failed.
-     */
-    public void testNonIgfsOrphans() throws IgniteCheckedException {
-        HadoopFileBlock split1 = split(false, "/file1", 0, 100, INVALID_HOST_1, INVALID_HOST_2);
-        HadoopFileBlock split2 = split(false, "/file2", 0, 100, INVALID_HOST_1, INVALID_HOST_3);
-        HadoopFileBlock split3 = split(false, "/file3", 0, 100, INVALID_HOST_2, INVALID_HOST_3);
-
-        plan(1, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) && ensureEmpty(ID_3) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1) && ensureEmpty(ID_3) ||
-            ensureEmpty(ID_1) && ensureEmpty(ID_2) && ensureMappers(ID_3, split1) && ensureReducers(ID_3, 1);
-
-        plan(2, split1);
-        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) && ensureEmpty(ID_3) ||
-            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2) && ensureEmpty(ID_3) ||
-            ensureEmpty(ID_1) && ensureEmpty(ID_2) && ensureMappers(ID_3, split1) && ensureReducers(ID_3, 2);
-
-        plan(1, split1, split2, split3);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) && ensureReducers(ID_3, 0) ||
-            ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 0) ||
-            ensureReducers(ID_1, 0) && ensureReducers(ID_2, 0) && ensureReducers(ID_3, 1);
-
-        plan(3, split1, split2, split3);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
-        assert ensureReducers(ID_1, 1);
-        assert ensureReducers(ID_2, 1);
-        assert ensureReducers(ID_3, 1);
-
-        plan(5, split1, split2, split3);
-        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
-            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
-            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
-        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
-            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
-    }
-
-    /**
-     * Create plan.
-     *
-     * @param reducers Reducers count.
-     * @param splits Splits.
-     * @return Plan.
-     * @throws IgniteCheckedException If failed.
-     */
-    private static HadoopMapReducePlan plan(int reducers, HadoopInputSplit... splits) throws IgniteCheckedException {
-        assert reducers > 0;
-        assert splits != null && splits.length > 0;
-
-        Collection<HadoopInputSplit> splitList = new ArrayList<>(splits.length);
-
-        Collections.addAll(splitList, splits);
-
-        Collection<ClusterNode> top = new ArrayList<>();
-
-        GridTestNode node1 = new GridTestNode(ID_1);
-        GridTestNode node2 = new GridTestNode(ID_2);
-        GridTestNode node3 = new GridTestNode(ID_3);
-
-        node1.setHostName(HOST_1);
-        node2.setHostName(HOST_2);
-        node3.setHostName(HOST_3);
-
-        top.add(node1);
-        top.add(node2);
-        top.add(node3);
-
-        HadoopMapReducePlan plan = PLANNER.preparePlan(new HadoopPlannerMockJob(splitList, reducers), top, null);
-
-        PLAN.set(plan);
-
-        return plan;
-    }
-
-    /**
-     * Ensure that node contains the given mappers.
-     *
-     * @param nodeId Node ID.
-     * @param expSplits Expected splits.
-     * @return {@code True} if this assumption is valid.
-     */
-    private static boolean ensureMappers(UUID nodeId, HadoopInputSplit... expSplits) {
-        Collection<HadoopInputSplit> expSplitsCol = new ArrayList<>();
-
-        Collections.addAll(expSplitsCol, expSplits);
-
-        Collection<HadoopInputSplit> splits = PLAN.get().mappers(nodeId);
-
-        return F.eq(expSplitsCol, splits);
-    }
-
-    /**
-     * Ensure that node contains the given amount of reducers.
-     *
-     * @param nodeId Node ID.
-     * @param reducers Reducers.
-     * @return {@code True} if this assumption is valid.
-     */
-    private static boolean ensureReducers(UUID nodeId, int reducers) {
-        int[] reducersArr = PLAN.get().reducers(nodeId);
-
-        return reducers == 0 ? F.isEmpty(reducersArr) : (reducersArr != null && reducersArr.length == reducers);
-    }
-
-    /**
-     * Ensure that no mappers and reducers is located on this node.
-     *
-     * @param nodeId Node ID.
-     * @return {@code True} if this assumption is valid.
-     */
-    private static boolean ensureEmpty(UUID nodeId) {
-        return F.isEmpty(PLAN.get().mappers(nodeId)) && F.isEmpty(PLAN.get().reducers(nodeId));
-    }
-
-    /**
-     * Create split.
-     *
-     * @param igfs IGFS flag.
-     * @param file File.
-     * @param start Start.
-     * @param len Length.
-     * @param hosts Hosts.
-     * @return Split.
-     */
-    private static HadoopFileBlock split(boolean igfs, String file, long start, long len, String... hosts) {
-        URI uri = URI.create((igfs ? "igfs://igfs@" : "hdfs://") + file);
-
-        return new HadoopFileBlock(hosts, uri, start, len);
-    }
-
-    /**
-     * Create block location.
-     *
-     * @param start Start.
-     * @param len Length.
-     * @param nodeIds Node IDs.
-     * @return Block location.
-     */
-    private static IgfsBlockLocation location(long start, long len, UUID... nodeIds) {
-        assert nodeIds != null && nodeIds.length > 0;
-
-        Collection<ClusterNode> nodes = new ArrayList<>(nodeIds.length);
-
-        for (UUID id : nodeIds)
-            nodes.add(new GridTestNode(id));
-
-        return new IgfsBlockLocationImpl(start, len, nodes);
-    }
-
-    /**
-     * Map IGFS block to nodes.
-     *
-     * @param file File.
-     * @param start Start.
-     * @param len Length.
-     * @param locations Locations.
-     */
-    private static void mapIgfsBlock(URI file, long start, long len, IgfsBlockLocation... locations) {
-        assert locations != null && locations.length > 0;
-
-        IgfsPath path = new IgfsPath(file);
-
-        Block block = new Block(path, start, len);
-
-        Collection<IgfsBlockLocation> locationsList = new ArrayList<>();
-
-        Collections.addAll(locationsList, locations);
-
-        BLOCK_MAP.put(block, locationsList);
-    }
-
-    /**
-     * Block.
-     */
-    private static class Block {
-        /** */
-        private final IgfsPath path;
-
-        /** */
-        private final long start;
-
-        /** */
-        private final long len;
-
-        /**
-         * Constructor.
-         *
-         * @param path Path.
-         * @param start Start.
-         * @param len Length.
-         */
-        private Block(IgfsPath path, long start, long len) {
-            this.path = path;
-            this.start = start;
-            this.len = len;
-        }
-
-        /** {@inheritDoc} */
-        @SuppressWarnings("RedundantIfStatement")
-        @Override public boolean equals(Object o) {
-            if (this == o) return true;
-            if (!(o instanceof Block)) return false;
-
-            Block block = (Block) o;
-
-            if (len != block.len)
-                return false;
-
-            if (start != block.start)
-                return false;
-
-            if (!path.equals(block.path))
-                return false;
-
-            return true;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            int res = path.hashCode();
-
-            res = 31 * res + (int) (start ^ (start >>> 32));
-            res = 31 * res + (int) (len ^ (len >>> 32));
-
-            return res;
-        }
-    }
-
-    /**
-     * Mocked IGFS.
-     */
-    private static class MockIgfs extends IgfsMock {
-        /**
-         * Constructor.
-         */
-        public MockIgfs() {
-            super("igfs");
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean isProxy(URI path) {
-            return PROXY_MAP.containsKey(path) && PROXY_MAP.get(path);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) {
-            return BLOCK_MAP.get(new Block(path, start, len));
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean exists(IgfsPath path) {
-            return true;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
deleted file mode 100644
index 843b42b..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Error simulator.
- */
-public class HadoopErrorSimulator {
-    /** No-op singleton instance. */
-    public static final HadoopErrorSimulator noopInstance = new HadoopErrorSimulator();
-
-    /** Instance ref. */
-    private static final AtomicReference<HadoopErrorSimulator> ref = new AtomicReference<>(noopInstance);
-
-    /**
-     * Creates simulator of given kind with given stage bits.
-     *
-     * @param kind The kind.
-     * @param bits The stage bits.
-     * @return The simulator.
-     */
-    public static HadoopErrorSimulator create(Kind kind, int bits) {
-        switch (kind) {
-            case Noop:
-                return noopInstance;
-            case Runtime:
-                return new RuntimeExceptionBitHadoopErrorSimulator(bits);
-            case IOException:
-                return new IOExceptionBitHadoopErrorSimulator(bits);
-            case Error:
-                return new ErrorBitHadoopErrorSimulator(bits);
-            default:
-                throw new IllegalStateException("Unknown kind: " + kind);
-        }
-    }
-
-    /**
-     * Gets the error simulator instance.
-     */
-    public static HadoopErrorSimulator instance() {
-        return ref.get();
-    }
-
-    /**
-     * Sets instance.
-     */
-    public static boolean setInstance(HadoopErrorSimulator expect, HadoopErrorSimulator update) {
-        return ref.compareAndSet(expect, update);
-    }
-
-    /**
-     * Constructor.
-     */
-    private HadoopErrorSimulator() {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onMapConfigure() {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onMapSetup()  throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onMap() throws IOException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onMapCleanup()  throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onMapClose()  throws IOException {
-        // no-op
-    }
-
-    /**
-     * setConf() does not declare IOException to be thrown.
-     */
-    public void onCombineConfigure() {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onCombineSetup() throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onCombine() throws IOException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onCombineCleanup() throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onReduceConfigure() {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onReduceSetup()  throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onReduce()  throws IOException {
-        // no-op
-    }
-
-    /**
-     * Invoked on the named stage.
-     */
-    public void onReduceCleanup()  throws IOException, InterruptedException {
-        // no-op
-    }
-
-    /**
-     * Error kind.
-     */
-    public enum Kind {
-        /** No error. */
-        Noop,
-
-        /** Runtime. */
-        Runtime,
-
-        /** IOException. */
-        IOException,
-
-        /** java.lang.Error. */
-        Error
-    }
-
-    /**
-     * Runtime error simulator.
-     */
-    public static class RuntimeExceptionBitHadoopErrorSimulator extends HadoopErrorSimulator {
-        /** Stage bits: defines what map-reduce stages will cause errors. */
-        private final int bits;
-
-        /**
-         * Constructor.
-         */
-        protected RuntimeExceptionBitHadoopErrorSimulator(int b) {
-            bits = b;
-        }
-
-        /**
-         * Simulates an error.
-         */
-        protected void simulateError() throws IOException {
-            throw new RuntimeException("An error simulated by " + getClass().getSimpleName());
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onMapConfigure() {
-            try {
-                if ((bits & 1) != 0)
-                    simulateError();
-            }
-            catch (IOException e) {
-                // ignore
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onMapSetup() throws IOException, InterruptedException {
-            if ((bits & 2) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onMap() throws IOException {
-            if ((bits & 4) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onMapCleanup() throws IOException, InterruptedException {
-            if ((bits & 8) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onCombineConfigure() {
-            try {
-                if ((bits & 16) != 0)
-                    simulateError();
-            }
-            catch (IOException e) {
-                // ignore
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onCombineSetup() throws IOException, InterruptedException {
-            if ((bits & 32) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onCombine() throws IOException {
-            if ((bits & 64) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onCombineCleanup() throws IOException, InterruptedException {
-            if ((bits & 128) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onReduceConfigure() {
-            try {
-                if ((bits & 256) != 0)
-                    simulateError();
-            }
-            catch (IOException e) {
-                // ignore
-            }
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onReduceSetup() throws IOException, InterruptedException {
-            if ((bits & 512) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onReduce() throws IOException {
-            if ((bits & 1024) != 0)
-                simulateError();
-        }
-
-        /** {@inheritDoc} */
-        @Override public final void onReduceCleanup() throws IOException, InterruptedException {
-            if ((bits & 2048) != 0)
-                simulateError();
-        }
-    }
-
-    /**
-     * java.lang.Error simulator.
-     */
-    public static class ErrorBitHadoopErrorSimulator extends RuntimeExceptionBitHadoopErrorSimulator {
-        /**
-         * Constructor.
-         */
-        public ErrorBitHadoopErrorSimulator(int bits) {
-            super(bits);
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void simulateError() {
-            throw new Error("An error simulated by " + getClass().getSimpleName());
-        }
-    }
-
-    /**
-     * IOException simulator.
-     */
-    public static class IOExceptionBitHadoopErrorSimulator extends RuntimeExceptionBitHadoopErrorSimulator {
-        /**
-         * Constructor.
-         */
-        public IOExceptionBitHadoopErrorSimulator(int bits) {
-            super(bits);
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void simulateError() throws IOException {
-            throw new IOException("An IOException simulated by " + getClass().getSimpleName());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
deleted file mode 100644
index 946ba77..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.testframework.GridTestUtils;
-
-/**
- * Test file systems for the working directory multi-threading support.
- */
-public class HadoopFileSystemsTest extends HadoopAbstractSelfTest {
-    /** the number of threads */
-    private static final int THREAD_COUNT = 3;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        startGrids(gridCount());
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean igfsEnabled() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected int gridCount() {
-        return 1;
-    }
-
-
-    /**
-     * Test the file system with specified URI for the multi-thread working directory support.
-     *
-     * @param uri Base URI of the file system (scheme and authority).
-     * @throws Exception If fails.
-     */
-    private void testFileSystem(final URI uri) throws Exception {
-        final Configuration cfg = new Configuration();
-
-        setupFileSystems(cfg);
-
-        cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP,
-            new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString());
-
-        final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT);
-        final CountDownLatch changeDirPhase = new CountDownLatch(THREAD_COUNT);
-        final CountDownLatch changeAbsDirPhase = new CountDownLatch(THREAD_COUNT);
-        final CountDownLatch finishPhase = new CountDownLatch(THREAD_COUNT);
-
-        final Path[] newUserInitWorkDir = new Path[THREAD_COUNT];
-        final Path[] newWorkDir = new Path[THREAD_COUNT];
-        final Path[] newAbsWorkDir = new Path[THREAD_COUNT];
-        final Path[] newInstanceWorkDir = new Path[THREAD_COUNT];
-
-        final AtomicInteger threadNum = new AtomicInteger(0);
-
-        GridTestUtils.runMultiThreadedAsync(new Runnable() {
-            @Override public void run() {
-                try {
-                    int curThreadNum = threadNum.getAndIncrement();
-
-                    if ("file".equals(uri.getScheme()))
-                        FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum));
-
-                    changeUserPhase.countDown();
-                    changeUserPhase.await();
-
-                    newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
-
-                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum));
-
-                    changeDirPhase.countDown();
-                    changeDirPhase.await();
-
-                    newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
-
-                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum));
-
-                    changeAbsDirPhase.countDown();
-                    changeAbsDirPhase.await();
-
-                    newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
-
-                    newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory();
-
-                    finishPhase.countDown();
-                }
-                catch (InterruptedException | IOException e) {
-                    error("Failed to execute test thread.", e);
-
-                    fail();
-                }
-            }
-        }, THREAD_COUNT, "filesystems-test");
-
-        finishPhase.await();
-
-        for (int i = 0; i < THREAD_COUNT; i ++) {
-            cfg.set(MRJobConfig.USER_NAME, "user" + i);
-
-            Path workDir = new Path(new Path(uri), "user/user" + i);
-
-            cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString());
-
-            assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());
-
-            assertEquals(workDir, newUserInitWorkDir[i]);
-
-            assertEquals(new Path(new Path(uri), "user/user" + i + "/folder" + i), newWorkDir[i]);
-
-            assertEquals(new Path("/folder" + i), newAbsWorkDir[i]);
-
-            assertEquals(new Path(new Path(uri), "user/" + System.getProperty("user.name")), newInstanceWorkDir[i]);
-        }
-
-        System.out.println(System.getProperty("user.dir"));
-    }
-
-    /**
-     * Test LocalFS multi-thread working directory.
-     *
-     * @throws Exception If fails.
-     */
-    public void testLocal() throws Exception {
-        testFileSystem(URI.create("file:///"));
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
deleted file mode 100644
index db87e33..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.GridRandom;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.S;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
-
-/**
- * Grouping test.
- */
-public class HadoopGroupingTest extends HadoopAbstractSelfTest {
-    /** */
-    private static final String PATH_OUTPUT = "/test-out";
-
-    /** */
-    private static final GridConcurrentHashSet<UUID> vals = HadoopSharedMap.map(HadoopGroupingTest.class)
-        .put("vals", new GridConcurrentHashSet<UUID>());
-
-    /** {@inheritDoc} */
-    @Override protected int gridCount() {
-        return 3;
-    }
-
-    /** {@inheritDoc} */
-    protected boolean igfsEnabled() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        startGrids(gridCount());
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
-        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
-
-        // TODO: IGNITE-404: Uncomment when fixed.
-        //cfg.setExternalExecution(false);
-
-        return cfg;
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testGroupingReducer() throws Exception {
-        doTestGrouping(false);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testGroupingCombiner() throws Exception {
-        doTestGrouping(true);
-    }
-
-    /**
-     * @param combiner With combiner.
-     * @throws Exception If failed.
-     */
-    public void doTestGrouping(boolean combiner) throws Exception {
-        vals.clear();
-
-        Job job = Job.getInstance();
-
-        job.setInputFormatClass(InFormat.class);
-        job.setOutputFormatClass(OutFormat.class);
-
-        job.setOutputKeyClass(YearTemperature.class);
-        job.setOutputValueClass(Text.class);
-
-        job.setMapperClass(Mapper.class);
-
-        if (combiner) {
-            job.setCombinerClass(MyReducer.class);
-            job.setNumReduceTasks(0);
-            job.setCombinerKeyGroupingComparatorClass(YearComparator.class);
-        }
-        else {
-            job.setReducerClass(MyReducer.class);
-            job.setNumReduceTasks(4);
-            job.setGroupingComparatorClass(YearComparator.class);
-        }
-
-        grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2),
-            createJobInfo(job.getConfiguration())).get(30000);
-
-        assertTrue(vals.isEmpty());
-    }
-
-    public static class MyReducer extends Reducer<YearTemperature, Text, Text, Object> {
-        /** */
-        int lastYear;
-
-        @Override protected void reduce(YearTemperature key, Iterable<Text> vals0, Context context)
-            throws IOException, InterruptedException {
-            X.println("___ : " + context.getTaskAttemptID() + " --> " + key);
-
-            Set<UUID> ids = new HashSet<>();
-
-            for (Text val : vals0)
-                assertTrue(ids.add(UUID.fromString(val.toString())));
-
-            for (Text val : vals0)
-                assertTrue(ids.remove(UUID.fromString(val.toString())));
-
-            assertTrue(ids.isEmpty());
-
-            assertTrue(key.year > lastYear);
-
-            lastYear = key.year;
-
-            for (Text val : vals0)
-                assertTrue(vals.remove(UUID.fromString(val.toString())));
-        }
-    }
-
-    public static class YearComparator implements RawComparator<YearTemperature> { // Grouping comparator.
-        /** {@inheritDoc} */
-        @Override public int compare(YearTemperature o1, YearTemperature o2) {
-            return Integer.compare(o1.year, o2.year);
-        }
-
-        /** {@inheritDoc} */
-        @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
-            throw new IllegalStateException();
-        }
-    }
-
-    public static class YearTemperature implements WritableComparable<YearTemperature>, Cloneable {
-        /** */
-        private int year;
-
-        /** */
-        private int temperature;
-
-        /** {@inheritDoc} */
-        @Override public void write(DataOutput out) throws IOException {
-            out.writeInt(year);
-            out.writeInt(temperature);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void readFields(DataInput in) throws IOException {
-            year = in.readInt();
-            temperature = in.readInt();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean equals(Object o) {
-            throw new IllegalStateException();
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() { // To be partitioned by year.
-            return year;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int compareTo(YearTemperature o) {
-            int res = Integer.compare(year, o.year);
-
-            if (res != 0)
-                return res;
-
-            // Sort comparator by year and temperature, to find max for year.
-            return Integer.compare(o.temperature, temperature);
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(YearTemperature.class, this);
-        }
-    }
-
-    public static class InFormat extends InputFormat<YearTemperature, Text> {
-        /** {@inheritDoc} */
-        @Override public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
-            ArrayList<InputSplit> list = new ArrayList<>();
-
-            for (int i = 0; i < 10; i++)
-                list.add(new HadoopSortingTest.FakeSplit(20));
-
-            return list;
-        }
-
-        /** {@inheritDoc} */
-        @Override public RecordReader<YearTemperature, Text> createRecordReader(final InputSplit split,
-            TaskAttemptContext context) throws IOException, InterruptedException {
-            return new RecordReader<YearTemperature, Text>() {
-                /** */
-                int cnt;
-
-                /** */
-                Random rnd = new GridRandom();
-
-                /** */
-                YearTemperature key = new YearTemperature();
-
-                /** */
-                Text val = new Text();
-
-                @Override public void initialize(InputSplit split, TaskAttemptContext context) {
-                    // No-op.
-                }
-
-                @Override public boolean nextKeyValue() throws IOException, InterruptedException {
-                    return cnt++ < split.getLength();
-                }
-
-                @Override public YearTemperature getCurrentKey() {
-                    key.year = 1990 + rnd.nextInt(10);
-                    key.temperature = 10 + rnd.nextInt(20);
-
-                    return key;
-                }
-
-                @Override public Text getCurrentValue() {
-                    UUID id = UUID.randomUUID();
-
-                    assertTrue(vals.add(id));
-
-                    val.set(id.toString());
-
-                    return val;
-                }
-
-                @Override public float getProgress() {
-                    return 0;
-                }
-
-                @Override public void close() {
-                    // No-op.
-                }
-            };
-        }
-    }
-
-    /**
-     *
-     */
-    public static class OutFormat extends OutputFormat {
-        /** {@inheritDoc} */
-        @Override public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
-            // No-op.
-        }
-
-        /** {@inheritDoc} */
-        @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException {
-            return null;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
deleted file mode 100644
index 9e268b7..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.IgniteKernal;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
-
-/**
- * Job tracker self test.
- */
-public class HadoopJobTrackerSelfTest extends HadoopAbstractSelfTest {
-    /** */
-    private static final String PATH_OUTPUT = "/test-out";
-
-    /** Test block count parameter name. */
-    private static final int BLOCK_CNT = 10;
-
-    /** */
-    private static HadoopSharedMap m = HadoopSharedMap.map(HadoopJobTrackerSelfTest.class);
-
-    /** Map task execution count. */
-    private static final AtomicInteger mapExecCnt = m.put("mapExecCnt", new AtomicInteger());
-
-    /** Reduce task execution count. */
-    private static final AtomicInteger reduceExecCnt = m.put("reduceExecCnt", new AtomicInteger());
-
-    /** Reduce task execution count. */
-    private static final AtomicInteger combineExecCnt = m.put("combineExecCnt", new AtomicInteger());
-
-    /** */
-    private static final Map<String, CountDownLatch> latch = m.put("latch", new HashMap<String, CountDownLatch>());
-
-    /** {@inheritDoc} */
-    @Override protected boolean igfsEnabled() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        super.beforeTestsStarted();
-
-        startGrids(gridCount());
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        stopAllGrids();
-
-        super.afterTestsStopped();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        latch.put("mapAwaitLatch", new CountDownLatch(1));
-        latch.put("reduceAwaitLatch", new CountDownLatch(1));
-        latch.put("combineAwaitLatch", new CountDownLatch(1));
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        mapExecCnt.set(0);
-        combineExecCnt.set(0);
-        reduceExecCnt.set(0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
-        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
-
-        cfg.setMapReducePlanner(new HadoopTestRoundRobinMrPlanner());
-
-        // TODO: IGNITE-404: Uncomment when fixed.
-        //cfg.setExternalExecution(false);
-
-        return cfg;
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testSimpleTaskSubmit() throws Exception {
-        try {
-            UUID globalId = UUID.randomUUID();
-
-            Job job = Job.getInstance();
-            setupFileSystems(job.getConfiguration());
-
-            job.setMapperClass(TestMapper.class);
-            job.setReducerClass(TestReducer.class);
-            job.setInputFormatClass(InFormat.class);
-
-            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "1"));
-
-            HadoopJobId jobId = new HadoopJobId(globalId, 1);
-
-            grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
-
-            checkStatus(jobId, false);
-
-            info("Releasing map latch.");
-
-            latch.get("mapAwaitLatch").countDown();
-
-            checkStatus(jobId, false);
-
-            info("Releasing reduce latch.");
-
-            latch.get("reduceAwaitLatch").countDown();
-
-            checkStatus(jobId, true);
-
-            assertEquals(10, mapExecCnt.get());
-            assertEquals(0, combineExecCnt.get());
-            assertEquals(1, reduceExecCnt.get());
-        }
-        finally {
-            // Safety.
-            latch.get("mapAwaitLatch").countDown();
-            latch.get("combineAwaitLatch").countDown();
-            latch.get("reduceAwaitLatch").countDown();
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testTaskWithCombinerPerMap() throws Exception {
-        try {
-            UUID globalId = UUID.randomUUID();
-
-            Job job = Job.getInstance();
-            setupFileSystems(job.getConfiguration());
-
-            job.setMapperClass(TestMapper.class);
-            job.setReducerClass(TestReducer.class);
-            job.setCombinerClass(TestCombiner.class);
-            job.setInputFormatClass(InFormat.class);
-
-            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "2"));
-
-            HadoopJobId jobId = new HadoopJobId(globalId, 1);
-
-            grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
-
-            checkStatus(jobId, false);
-
-            info("Releasing map latch.");
-
-            latch.get("mapAwaitLatch").countDown();
-
-            checkStatus(jobId, false);
-
-            // All maps are completed. We have a combiner, so no reducers should be executed
-            // before combiner latch is released.
-
-            U.sleep(50);
-
-            assertEquals(0, reduceExecCnt.get());
-
-            info("Releasing combiner latch.");
-
-            latch.get("combineAwaitLatch").countDown();
-
-            checkStatus(jobId, false);
-
-            info("Releasing reduce latch.");
-
-            latch.get("reduceAwaitLatch").countDown();
-
-            checkStatus(jobId, true);
-
-            assertEquals(10, mapExecCnt.get());
-            assertEquals(10, combineExecCnt.get());
-            assertEquals(1, reduceExecCnt.get());
-        }
-        finally {
-            // Safety.
-            latch.get("mapAwaitLatch").countDown();
-            latch.get("combineAwaitLatch").countDown();
-            latch.get("reduceAwaitLatch").countDown();
-        }
-    }
-
-    /**
-     * Checks job execution status.
-     *
-     * @param jobId Job ID.
-     * @param complete Completion status.
-     * @throws Exception If failed.
-     */
-    private void checkStatus(HadoopJobId jobId, boolean complete) throws Exception {
-        for (int i = 0; i < gridCount(); i++) {
-            IgniteKernal kernal = (IgniteKernal)grid(i);
-
-            Hadoop hadoop = kernal.hadoop();
-
-            HadoopJobStatus stat = hadoop.status(jobId);
-
-            assert stat != null;
-
-            IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
-
-            if (!complete)
-                assertFalse(fut.isDone());
-            else {
-                info("Waiting for status future completion on node [idx=" + i + ", nodeId=" +
-                    kernal.getLocalNodeId() + ']');
-
-                fut.get();
-            }
-        }
-    }
-
-    /**
-     * Test input format
-     */
-    public static class InFormat extends InputFormat {
-
-        @Override public List<InputSplit> getSplits(JobContext ctx) throws IOException, InterruptedException {
-            List<InputSplit> res = new ArrayList<>(BLOCK_CNT);
-
-            for (int i = 0; i < BLOCK_CNT; i++)
-                try {
-                    res.add(new FileSplit(new Path(new URI("someFile")), i, i + 1, new String[] {"localhost"}));
-                }
-                catch (URISyntaxException e) {
-                    throw new IOException(e);
-                }
-
-            return res;
-        }
-
-        @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext ctx) throws IOException, InterruptedException {
-            return new RecordReader() {
-                @Override public void initialize(InputSplit split, TaskAttemptContext ctx) {
-                }
-
-                @Override public boolean nextKeyValue() {
-                    return false;
-                }
-
-                @Override public Object getCurrentKey() {
-                    return null;
-                }
-
-                @Override public Object getCurrentValue() {
-                    return null;
-                }
-
-                @Override public float getProgress() {
-                    return 0;
-                }
-
-                @Override public void close() {
-
-                }
-            };
-        }
-    }
-
-    /**
-     * Test mapper.
-     */
-    private static class TestMapper extends Mapper {
-        @Override public void run(Context ctx) throws IOException, InterruptedException {
-            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
-
-            latch.get("mapAwaitLatch").await();
-
-            mapExecCnt.incrementAndGet();
-
-            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
-        }
-    }
-
-    /**
-     * Test reducer.
-     */
-    private static class TestReducer extends Reducer {
-        @Override public void run(Context ctx) throws IOException, InterruptedException {
-            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
-
-            latch.get("reduceAwaitLatch").await();
-
-            reduceExecCnt.incrementAndGet();
-
-            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
-        }
-    }
-
-    /**
-     * Test combiner.
-     */
-    private static class TestCombiner extends Reducer {
-        @Override public void run(Context ctx) throws IOException, InterruptedException {
-            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
-
-            latch.get("combineAwaitLatch").await();
-
-            combineExecCnt.incrementAndGet();
-
-            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
deleted file mode 100644
index 25ef382..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.UUID;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.serializer.WritableSerialization;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobConfigurable;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount1;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
-
-/**
- * Tests map-reduce execution with embedded mode.
- */
-public class HadoopMapReduceEmbeddedSelfTest extends HadoopMapReduceTest {
-    /** */
-    private static Map<String, Boolean> flags = HadoopSharedMap.map(HadoopMapReduceEmbeddedSelfTest.class)
-        .put("flags", new HashMap<String, Boolean>());
-
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
-        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
-
-        // TODO: IGNITE-404: Uncomment when fixed.
-        //cfg.setExternalExecution(false);
-
-        return cfg;
-    }
-
-    /**
-     * Tests whole job execution with all phases in old and new versions of API with definition of custom
-     * Serialization, Partitioner and IO formats.
-     * @throws Exception If fails.
-     */
-    public void testMultiReducerWholeMapReduceExecution() throws Exception {
-        IgfsPath inDir = new IgfsPath(PATH_INPUT);
-
-        igfs.mkdirs(inDir);
-
-        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
-
-        generateTestFile(inFile.toString(), "key1", 10000, "key2", 20000, "key3", 15000, "key4", 7000, "key5", 12000,
-            "key6", 18000 );
-
-        for (int i = 0; i < 2; i++) {
-            boolean useNewAPI = i == 1;
-
-            igfs.delete(new IgfsPath(PATH_OUTPUT), true);
-
-            flags.put("serializationWasConfigured", false);
-            flags.put("partitionerWasConfigured", false);
-            flags.put("inputFormatWasConfigured", false);
-            flags.put("outputFormatWasConfigured", false);
-
-            JobConf jobConf = new JobConf();
-
-            jobConf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, CustomSerialization.class.getName());
-
-            //To split into about 6-7 items for v2
-            jobConf.setInt(FileInputFormat.SPLIT_MAXSIZE, 65000);
-
-            //For v1
-            jobConf.setInt("fs.local.block.size", 65000);
-
-            // File system coordinates.
-            setupFileSystems(jobConf);
-
-            HadoopWordCount1.setTasksClasses(jobConf, !useNewAPI, !useNewAPI, !useNewAPI);
-
-            if (!useNewAPI) {
-                jobConf.setPartitionerClass(CustomV1Partitioner.class);
-                jobConf.setInputFormat(CustomV1InputFormat.class);
-                jobConf.setOutputFormat(CustomV1OutputFormat.class);
-            }
-
-            Job job = Job.getInstance(jobConf);
-
-            HadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI, false);
-
-            if (useNewAPI) {
-                job.setPartitionerClass(CustomV2Partitioner.class);
-                job.setInputFormatClass(CustomV2InputFormat.class);
-                job.setOutputFormatClass(CustomV2OutputFormat.class);
-            }
-
-            job.setOutputKeyClass(Text.class);
-            job.setOutputValueClass(IntWritable.class);
-
-            FileInputFormat.setInputPaths(job, new Path(igfsScheme() + inFile.toString()));
-            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
-
-            job.setNumReduceTasks(3);
-
-            job.setJarByClass(HadoopWordCount2.class);
-
-            IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
-                    createJobInfo(job.getConfiguration()));
-
-            fut.get();
-
-            assertTrue("Serialization was configured (new API is " + useNewAPI + ")",
-                 flags.get("serializationWasConfigured"));
-
-            assertTrue("Partitioner was configured (new API is = " + useNewAPI + ")",
-                 flags.get("partitionerWasConfigured"));
-
-            assertTrue("Input format was configured (new API is = " + useNewAPI + ")",
-                 flags.get("inputFormatWasConfigured"));
-
-            assertTrue("Output format was configured (new API is = " + useNewAPI + ")",
-                 flags.get("outputFormatWasConfigured"));
-
-            assertEquals("Use new API = " + useNewAPI,
-                "key3\t15000\n" +
-                "key6\t18000\n",
-                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00000")
-            );
-
-            assertEquals("Use new API = " + useNewAPI,
-                "key1\t10000\n" +
-                "key4\t7000\n",
-                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00001")
-            );
-
-            assertEquals("Use new API = " + useNewAPI,
-                "key2\t20000\n" +
-                "key5\t12000\n",
-                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00002")
-            );
-
-        }
-    }
-
-    /**
-     * Custom serialization class that inherits behaviour of native {@link WritableSerialization}.
-     */
-    protected static class CustomSerialization extends WritableSerialization {
-        @Override public void setConf(Configuration conf) {
-            super.setConf(conf);
-
-            flags.put("serializationWasConfigured", true);
-        }
-    }
-
-    /**
-     * Custom implementation of Partitioner in v1 API.
-     */
-    private static class CustomV1Partitioner extends org.apache.hadoop.mapred.lib.HashPartitioner {
-        /** {@inheritDoc} */
-        @Override public void configure(JobConf job) {
-            flags.put("partitionerWasConfigured", true);
-        }
-    }
-
-    /**
-     * Custom implementation of Partitioner in v2 API.
-     */
-    private static class CustomV2Partitioner extends org.apache.hadoop.mapreduce.lib.partition.HashPartitioner
-            implements Configurable {
-        /** {@inheritDoc} */
-        @Override public void setConf(Configuration conf) {
-            flags.put("partitionerWasConfigured", true);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Configuration getConf() {
-            return null;
-        }
-    }
-
-    /**
-     * Custom implementation of InputFormat in v2 API.
-     */
-    private static class CustomV2InputFormat extends org.apache.hadoop.mapreduce.lib.input.TextInputFormat implements Configurable {
-        /** {@inheritDoc} */
-        @Override public void setConf(Configuration conf) {
-            flags.put("inputFormatWasConfigured", true);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Configuration getConf() {
-            return null;
-        }
-    }
-
-    /**
-     * Custom implementation of OutputFormat in v2 API.
-     */
-    private static class CustomV2OutputFormat extends org.apache.hadoop.mapreduce.lib.output.TextOutputFormat implements Configurable {
-        /** {@inheritDoc} */
-        @Override public void setConf(Configuration conf) {
-            flags.put("outputFormatWasConfigured", true);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Configuration getConf() {
-            return null;
-        }
-    }
-
-    /**
-     * Custom implementation of InputFormat in v1 API.
-     */
-    private static class CustomV1InputFormat extends org.apache.hadoop.mapred.TextInputFormat {
-        /** {@inheritDoc} */
-        @Override public void configure(JobConf job) {
-            super.configure(job);
-
-            flags.put("inputFormatWasConfigured", true);
-        }
-    }
-
-    /**
-     * Custom implementation of OutputFormat in v1 API.
-     */
-    private static class CustomV1OutputFormat extends org.apache.hadoop.mapred.TextOutputFormat implements JobConfigurable {
-        /** {@inheritDoc} */
-        @Override public void configure(JobConf job) {
-            flags.put("outputFormatWasConfigured", true);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
deleted file mode 100644
index dd12935..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
-
-/**
- * Test of error resiliency after an error in a map-reduce job execution.
- * Combinations tested:
- * { new ALI, old API }
- *   x { unchecked exception, checked exception, error }
- *   x { phase where the error happens }.
- */
-public class HadoopMapReduceErrorResilienceTest extends HadoopAbstractMapReduceTest {
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError0_Runtime() throws Exception {
-        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.Runtime);
-    }
-
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError0_IOException() throws Exception {
-        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.IOException);
-    }
-
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError0_Error() throws Exception {
-        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.Error);
-    }
-
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError7_Runtime() throws Exception {
-        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.Runtime);
-    }
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError7_IOException() throws Exception {
-        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.IOException);
-    }
-    /**
-     * Tests recovery.
-     *
-     * @throws Exception If failed.
-     */
-    public void testRecoveryAfterAnError7_Error() throws Exception {
-        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.Error);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return 10 * 60 * 1000L;
-    }
-
-    /**
-     * Tests correct work after an error.
-     *
-     * @throws Exception On error.
-     */
-    private void doTestRecoveryAfterAnError(int useNewBits, HadoopErrorSimulator.Kind simulatorKind) throws Exception {
-        try {
-            IgfsPath inDir = new IgfsPath(PATH_INPUT);
-
-            igfs.mkdirs(inDir);
-
-            IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
-
-            generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow);
-
-            boolean useNewMapper = (useNewBits & 1) == 0;
-            boolean useNewCombiner = (useNewBits & 2) == 0;
-            boolean useNewReducer = (useNewBits & 4) == 0;
-
-            for (int i = 0; i < 12; i++) {
-                int bits = 1 << i;
-
-                System.out.println("############################ Simulator kind = " + simulatorKind
-                    + ", Stage bits = " + bits);
-
-                HadoopErrorSimulator sim = HadoopErrorSimulator.create(simulatorKind, bits);
-
-                doTestWithErrorSimulator(sim, inFile, useNewMapper, useNewCombiner, useNewReducer);
-            }
-        } catch (Throwable t) {
-            t.printStackTrace();
-
-            fail("Unexpected throwable: " + t);
-        }
-    }
-
-    /**
-     * Performs test with given error simulator.
-     *
-     * @param sim The simulator.
-     * @param inFile Input file.
-     * @param useNewMapper If the use new mapper API.
-     * @param useNewCombiner If to use new combiner.
-     * @param useNewReducer If to use new reducer API.
-     * @throws Exception If failed.
-     */
-    private void doTestWithErrorSimulator(HadoopErrorSimulator sim, IgfsPath inFile, boolean useNewMapper,
-        boolean useNewCombiner, boolean useNewReducer) throws Exception {
-        // Set real simulating error simulator:
-        assertTrue(HadoopErrorSimulator.setInstance(HadoopErrorSimulator.noopInstance, sim));
-
-        try {
-            // Expect failure there:
-            doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
-        }
-        catch (Throwable t) { // This may be an Error.
-            // Expected:
-            System.out.println(t.toString()); // Ignore, continue the test.
-        }
-
-        // Set no-op error simulator:
-        assertTrue(HadoopErrorSimulator.setInstance(sim, HadoopErrorSimulator.noopInstance));
-
-        // Expect success there:
-        doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
deleted file mode 100644
index b703896..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
-
-/**
- * Test of whole cycle of map-reduce processing via Job tracker.
- */
-public class HadoopMapReduceTest extends HadoopAbstractMapReduceTest {
-    /**
-     * Tests whole job execution with all phases in all combination of new and old versions of API.
-     * @throws Exception If fails.
-     */
-    public void testWholeMapReduceExecution() throws Exception {
-        IgfsPath inDir = new IgfsPath(PATH_INPUT);
-
-        igfs.mkdirs(inDir);
-
-        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
-
-        generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow );
-
-        for (boolean[] apiMode: getApiModes()) {
-            assert apiMode.length == 3;
-
-            boolean useNewMapper = apiMode[0];
-            boolean useNewCombiner = apiMode[1];
-            boolean useNewReducer = apiMode[2];
-
-            doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
-        }
-    }
-
-    /**
-     * Gets API mode combinations to be tested.
-     * Each boolean[] is { newMapper, newCombiner, newReducer } flag triplet.
-     *
-     * @return Arrays of booleans indicating API combinations to test.
-     */
-    protected boolean[][] getApiModes() {
-        return new boolean[][] {
-            { false, false, false },
-            { false, false, true },
-            { false, true,  false },
-            { true,  false, false },
-            { true,  true,  true },
-        };
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
deleted file mode 100644
index 0c172c3..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import org.apache.ignite.configuration.IgniteConfiguration;
-
-/**
- * Test attempt to execute a map-reduce task while no Hadoop processor available.
- */
-public class HadoopNoHadoopMapReduceTest extends HadoopMapReduceTest {
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration c = super.getConfiguration(gridName);
-
-        c.setHadoopConfiguration(null);
-        c.setPeerClassLoadingEnabled(true);
-
-        return c;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void testWholeMapReduceExecution() throws Exception {
-        try {
-            super.testWholeMapReduceExecution();
-
-            fail("IllegalStateException expected.");
-        }
-        catch (IllegalStateException ignore) {
-            // No-op.
-        }
-    }
-}


[34/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
new file mode 100644
index 0000000..f793ec3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java
@@ -0,0 +1,2432 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.igfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ignite.GridTestIoUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.FileSystemConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
+import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEx;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutProc;
+import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ThreadLocalRandom8;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.reflect.Field;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
+import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
+import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
+import static org.apache.ignite.igfs.IgfsMode.PROXY;
+
+/**
+ * Test hadoop file system implementation.
+ */
+@SuppressWarnings("all")
+public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonAbstractTest {
+    /** Primary file system authority. */
+    private static final String PRIMARY_AUTHORITY = "igfs:grid0@";
+
+    /** Primary file systme URI. */
+    private static final String PRIMARY_URI = "igfs://" + PRIMARY_AUTHORITY + "/";
+
+    /** Secondary file system authority. */
+    private static final String SECONDARY_AUTHORITY = "igfs_secondary:grid_secondary@127.0.0.1:11500";
+
+    /** Secondary file systme URI. */
+    private static final String SECONDARY_URI = "igfs://" + SECONDARY_AUTHORITY + "/";
+
+    /** Secondary file system configuration path. */
+    private static final String SECONDARY_CFG_PATH = "/work/core-site-test.xml";
+
+    /** Secondary file system user. */
+    private static final String SECONDARY_FS_USER = "secondary-default";
+
+    /** Secondary endpoint configuration. */
+    protected static final IgfsIpcEndpointConfiguration SECONDARY_ENDPOINT_CFG;
+
+    /** Group size. */
+    public static final int GRP_SIZE = 128;
+
+    /** Path to the default hadoop configuration. */
+    public static final String HADOOP_FS_CFG = "examples/config/filesystem/core-site.xml";
+
+    /** Thread count for multithreaded tests. */
+    private static final int THREAD_CNT = 8;
+
+    /** IP finder. */
+    private final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Barrier for multithreaded tests. */
+    private static CyclicBarrier barrier;
+
+    /** File system. */
+    private static FileSystem fs;
+
+    /** Default IGFS mode. */
+    protected final IgfsMode mode;
+
+    /** Skip embedded mode flag. */
+    private final boolean skipEmbed;
+
+    /** Skip local shmem flag. */
+    private final boolean skipLocShmem;
+
+    /** Endpoint. */
+    private final String endpoint;
+
+    /** Primary file system URI. */
+    protected URI primaryFsUri;
+
+    /** Primary file system configuration. */
+    protected Configuration primaryFsCfg;
+
+    static {
+        SECONDARY_ENDPOINT_CFG = new IgfsIpcEndpointConfiguration();
+
+        SECONDARY_ENDPOINT_CFG.setType(IgfsIpcEndpointType.TCP);
+        SECONDARY_ENDPOINT_CFG.setPort(11500);
+    }
+
+    /** File statuses comparator. */
+    private static final Comparator<FileStatus> STATUS_COMPARATOR = new Comparator<FileStatus>() {
+        @SuppressWarnings("deprecation")
+        @Override public int compare(FileStatus o1, FileStatus o2) {
+            if (o1 == null || o2 == null)
+                return o1 == o2 ? 0 : o1 == null ? -1 : 1;
+
+            return o1.isDir() == o2.isDir() ? o1.getPath().compareTo(o2.getPath()) : o1.isDir() ? -1 : 1;
+        }
+    };
+
+    /**
+     * Constructor.
+     *
+     * @param mode Default IGFS mode.
+     * @param skipEmbed Whether to skip embedded mode.
+     * @param skipLocShmem Whether to skip local shmem mode.
+     * @param skipLocTcp Whether to skip local TCP mode.
+     */
+    protected IgniteHadoopFileSystemAbstractSelfTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) {
+        this.mode = mode;
+        this.skipEmbed = skipEmbed;
+        this.skipLocShmem = skipLocShmem;
+
+        endpoint = skipLocShmem ? "127.0.0.1:10500" : "shmem:10500";
+    }
+
+    /**
+     * Gets the user the Fs client operates on bahalf of.
+     * @return The user the Fs client operates on bahalf of.
+     */
+    protected String getClientFsUser() {
+        return "foo";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        Configuration secondaryConf = configuration(SECONDARY_AUTHORITY, true, true);
+
+        secondaryConf.setInt("fs.igfs.block.size", 1024);
+
+        String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
+
+        File file = new File(path);
+
+        try (FileOutputStream fos = new FileOutputStream(file)) {
+            secondaryConf.writeXml(fos);
+        }
+
+        startNodes();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 10 * 60 * 1000;
+    }
+
+    /**
+     * Starts the nodes for this test.
+     *
+     * @throws Exception If failed.
+     */
+    private void startNodes() throws Exception {
+        if (mode != PRIMARY) {
+            // Start secondary IGFS.
+            FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
+
+            igfsCfg.setDataCacheName("partitioned");
+            igfsCfg.setMetaCacheName("replicated");
+            igfsCfg.setName("igfs_secondary");
+            igfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG);
+            igfsCfg.setBlockSize(512 * 1024);
+            igfsCfg.setPrefetchBlocks(1);
+
+            CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+            cacheCfg.setName("partitioned");
+            cacheCfg.setCacheMode(PARTITIONED);
+            cacheCfg.setNearConfiguration(null);
+            cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+            cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+            cacheCfg.setBackups(0);
+            cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+            CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+            metaCacheCfg.setName("replicated");
+            metaCacheCfg.setCacheMode(REPLICATED);
+            metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+            metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+            IgniteConfiguration cfg = new IgniteConfiguration();
+
+            cfg.setGridName("grid_secondary");
+
+            TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+            discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
+
+            cfg.setDiscoverySpi(discoSpi);
+            cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
+            cfg.setFileSystemConfiguration(igfsCfg);
+            cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+
+            G.start(cfg);
+        }
+
+        startGrids(4);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        G.stopAll(true);
+
+        String path = U.getIgniteHome() + SECONDARY_CFG_PATH;
+
+        new File(path).delete();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        primaryFsUri = new URI(PRIMARY_URI);
+
+        primaryFsCfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem);
+
+        UserGroupInformation clientUgi = UserGroupInformation.getBestUGI(null, getClientFsUser());
+        assertNotNull(clientUgi);
+
+        // Create the Fs on behalf of the specific user:
+        clientUgi.doAs(new PrivilegedExceptionAction<Object>() {
+            @Override public Object run() throws Exception {
+                fs = FileSystem.get(primaryFsUri, primaryFsCfg);
+
+                return null;
+            }
+        });
+
+        barrier = new CyclicBarrier(THREAD_CNT);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        try {
+            HadoopIgfsUtils.clear(fs);
+        }
+        catch (Exception ignore) {
+            // No-op.
+        }
+
+        U.closeQuiet(fs);
+    }
+
+    /**
+     * Get primary IPC endpoint configuration.
+     *
+     * @param gridName Grid name.
+     * @return IPC primary endpoint configuration.
+     */
+    protected abstract IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(String gridName);
+
+    /** {@inheritDoc} */
+    @Override public String getTestGridName() {
+        return "grid";
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(cacheConfiguration(gridName));
+        cfg.setFileSystemConfiguration(igfsConfiguration(gridName));
+        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
+
+        return cfg;
+    }
+
+    /**
+     * Gets cache configuration.
+     *
+     * @param gridName Grid name.
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration[] cacheConfiguration(String gridName) {
+        CacheConfiguration cacheCfg = defaultCacheConfiguration();
+
+        cacheCfg.setName("partitioned");
+        cacheCfg.setCacheMode(PARTITIONED);
+        cacheCfg.setNearConfiguration(null);
+        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
+        cacheCfg.setBackups(0);
+        cacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
+
+        metaCacheCfg.setName("replicated");
+        metaCacheCfg.setCacheMode(REPLICATED);
+        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
+        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
+
+        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
+    }
+
+    /**
+     * Gets IGFS configuration.
+     *
+     * @param gridName Grid name.
+     * @return IGFS configuration.
+     */
+    protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException {
+        FileSystemConfiguration cfg = new FileSystemConfiguration();
+
+        cfg.setDataCacheName("partitioned");
+        cfg.setMetaCacheName("replicated");
+        cfg.setName("igfs");
+        cfg.setPrefetchBlocks(1);
+        cfg.setDefaultMode(mode);
+
+        if (mode != PRIMARY) {
+            CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
+
+            fac.setUri(SECONDARY_URI);
+            fac.setConfigPaths(SECONDARY_CFG_PATH);
+
+            IgniteHadoopIgfsSecondaryFileSystem sec = new IgniteHadoopIgfsSecondaryFileSystem();
+
+            sec.setFileSystemFactory(fac);
+            sec.setDefaultUserName(SECONDARY_FS_USER);
+
+            // NB: start() will be invoked upon IgfsImpl init.
+            cfg.setSecondaryFileSystem(sec);
+        }
+
+        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
+
+        cfg.setManagementPort(-1);
+        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
+
+        return cfg;
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetUriIfFSIsNotInitialized() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return new IgniteHadoopFileSystem().getUri();
+            }
+        }, IllegalStateException.class,
+            "URI is null (was IgniteHadoopFileSystem properly initialized?)");
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("NullableProblems")
+    public void testInitializeCheckParametersNameIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                new IgniteHadoopFileSystem().initialize(null, new Configuration());
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: name");
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("NullableProblems")
+    public void testInitializeCheckParametersCfgIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                new IgniteHadoopFileSystem().initialize(new URI(""), null);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: cfg");
+    }
+
+    /** @throws Exception If failed. */
+    public void testInitialize() throws Exception {
+        final IgniteHadoopFileSystem fs = new IgniteHadoopFileSystem();
+
+        fs.initialize(primaryFsUri, primaryFsCfg);
+
+        // Check repeatable initialization.
+        try {
+            fs.initialize(primaryFsUri, primaryFsCfg);
+
+            fail();
+        }
+        catch (IOException e) {
+            assertTrue(e.getMessage().contains("File system is already initialized"));
+        }
+
+        assertEquals(primaryFsUri, fs.getUri());
+
+        assertEquals(0, fs.getUsed());
+
+        fs.close();
+    }
+
+    /**
+     * Test how IPC cache map works.
+     *
+     * @throws Exception If failed.
+     */
+    public void testIpcCache() throws Exception {
+        HadoopIgfsEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop");
+
+        if (hadoop instanceof HadoopIgfsOutProc) {
+            FileSystem fsOther = null;
+
+            try {
+                Field field = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache");
+
+                field.setAccessible(true);
+
+                Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>)field.get(null);
+
+                Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem);
+
+                // we disable caching in order to obtain new FileSystem instance.
+                cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+                // Initial cache size.
+                int initSize = cache.size();
+
+                // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
+                fsOther = FileSystem.get(new URI(PRIMARY_URI), cfg);
+
+                assert fs != fsOther;
+
+                assertEquals(initSize, cache.size());
+
+                fsOther.close();
+
+                assertEquals(initSize, cache.size());
+
+                Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping");
+
+                stopField.setAccessible(true);
+
+                HadoopIgfsIpcIo io = null;
+
+                for (Map.Entry<String, HadoopIgfsIpcIo> ioEntry : cache.entrySet()) {
+                    if (endpoint.contains(ioEntry.getKey())) {
+                        io = ioEntry.getValue();
+
+                        break;
+                    }
+                }
+
+                assert io != null;
+
+                assert !(Boolean)stopField.get(io);
+
+                // Ensure that IO is stopped when nobody else is need it.
+                fs.close();
+
+                assert initSize >= cache.size();
+
+                assert (Boolean)stopField.get(io);
+            }
+            finally {
+                U.closeQuiet(fsOther);
+            }
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testCloseIfNotInitialized() throws Exception {
+        final FileSystem fs = new IgniteHadoopFileSystem();
+
+        // Check close makes nothing harmful.
+        fs.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testClose() throws Exception {
+        final Path path = new Path("dir");
+
+        fs.close();
+
+        // Check double close makes nothing harmful.
+        fs.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Nullable @Override public Object call() throws Exception {
+                fs.initialize(primaryFsUri, primaryFsCfg);
+
+                return null;
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Nullable @Override public Object call() throws Exception {
+                fs.setPermission(path, FsPermission.createImmutable((short)777));
+
+                return null;
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Nullable @Override public Object call() throws Exception {
+                fs.setOwner(path, "user", "group");
+
+                return null;
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.open(path, 256);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(path);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.append(path);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.rename(path, new Path("newDir"));
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.delete(path, true);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.listStatus(path);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.mkdirs(path);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(path);
+            }
+        }, IOException.class, "File system is stopped.");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileBlockLocations(new FileStatus(1L, false, 1, 1L, 1L, new Path("path")), 0L, 256L);
+            }
+        }, IOException.class, "File system is stopped.");
+    }
+
+    /** @throws Exception If failed. */
+    public void testCreateCheckParameters() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("deprecation")
+    public void testCreateBase() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+        Path file = new Path(dir, "someFile");
+
+        assertPathDoesNotExist(fs, file);
+
+        FsPermission fsPerm = new FsPermission((short)644);
+
+        FSDataOutputStream os = fs.create(file, fsPerm, false, 1, (short)1, 1L, null);
+
+        // Try to write something in file.
+        os.write("abc".getBytes());
+
+        os.close();
+
+        // Check file status.
+        FileStatus fileStatus = fs.getFileStatus(file);
+
+        assertFalse(fileStatus.isDir());
+        assertEquals(file, fileStatus.getPath());
+        assertEquals(fsPerm, fileStatus.getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("deprecation")
+    public void testCreateCheckOverwrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+        final Path file = new Path(dir, "someFile");
+
+        FSDataOutputStream out = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
+            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+
+        out.close();
+
+        // Check intermediate directory permissions.
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.create(file, FsPermission.getDefault(), false, 1024, (short)1, 2048, null);
+            }
+        }, PathExistsException.class, null);
+
+        // Overwrite should be successful.
+        FSDataOutputStream out1 = fs.create(file, true);
+
+        out1.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfNoSuchPath() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        assertPathDoesNotExist(fs, dir);
+
+        assertFalse(fs.delete(dir, true));
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file, false, 128);
+
+        final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks.
+
+        for (int i = 0; i < cnt; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        final FSDataInputStream is = fs.open(file, -1);
+
+        for (int i = 0; i < cnt / 2; i++)
+            assertEquals(i, is.readInt());
+
+        assert fs.delete(file, false);
+
+        assert !fs.exists(file);
+
+        is.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfFilePathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        assertTrue(fs.delete(file, false));
+
+        assertPathDoesNotExist(fs, file);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteIfDirectoryPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(dir);
+
+        os.close();
+
+        assertTrue(fs.delete(dir, false));
+
+        assertPathDoesNotExist(fs, dir);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteFailsIfNonRecursive() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        fs.create(someDir3).close();
+
+        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
+
+        assertFalse(fs.delete(someDir2, false));
+
+        assertPathExists(fs, someDir2);
+        assertPathExists(fs, someDir3);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteRecursively() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(someDir3);
+
+        os.close();
+
+        Path someDir2 = new Path(fsHome, "/someDir1/someDir2");
+
+        assertTrue(fs.delete(someDir2, true));
+
+        assertPathDoesNotExist(fs, someDir2);
+        assertPathDoesNotExist(fs, someDir3);
+    }
+
+    /** @throws Exception If failed. */
+    public void testDeleteRecursivelyFromRoot() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path someDir3 = new Path(fsHome, "/someDir1/someDir2/someDir3");
+
+        FSDataOutputStream os = fs.create(someDir3);
+
+        os.close();
+
+        Path root = new Path(fsHome, "/");
+
+        assertFalse(fs.delete(root, true));
+        assertTrue(fs.delete(new Path("/someDir1"), true));
+
+        assertPathDoesNotExist(fs, someDir3);
+        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1/someDir2"));
+        assertPathDoesNotExist(fs, new Path(fsHome, "/someDir1"));
+        assertPathExists(fs, root);
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("deprecation")
+    public void testSetPermissionCheckDefaultPermission() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
+            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+
+        os.close();
+
+        fs.setPermission(file, null);
+
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("deprecation")
+    public void testSetPermissionCheckNonRecursiveness() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
+            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+
+        os.close();
+
+        Path tmpDir = new Path(fsHome, "/tmp");
+
+        FsPermission perm = new FsPermission((short)123);
+
+        fs.setPermission(tmpDir, perm);
+
+        assertEquals(perm, fs.getFileStatus(tmpDir).getPermission());
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("OctalInteger")
+    public void testSetPermission() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        for (short i = 0; i <= 0777; i += 7) {
+            FsPermission perm = new FsPermission(i);
+
+            fs.setPermission(file, perm);
+
+            assertEquals(perm, fs.getFileStatus(file).getPermission());
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FsPermission perm = new FsPermission((short)123);
+
+        FSDataOutputStream os = fs.create(file);
+
+        fs.setPermission(file, perm);
+
+        os.close();
+
+        assertEquals(perm, fs.getFileStatus(file).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.setOwner(null, "aUser", "aGroup");
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: p");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersUserIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                fs.setOwner(file, null, "aGroup");
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: username");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckParametersGroupIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+                fs.setOwner(file, "aUser", null);
+
+                return null;
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: grpName");
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwner() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
+
+        fs.setOwner(file, "aUser", "aGroup");
+
+        assertEquals("aUser", fs.getFileStatus(file).getOwner());
+        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSetTimes() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "/heartbeat");
+
+        fs.create(file).close();
+
+        FileStatus status = fs.getFileStatus(file);
+
+        assertTrue(status.getAccessTime() > 0);
+        assertTrue(status.getModificationTime() > 0);
+
+        long mtime = System.currentTimeMillis() - 5000;
+        long atime = System.currentTimeMillis() - 4000;
+
+        fs.setTimes(file, mtime, atime);
+
+        status = fs.getFileStatus(file);
+
+        assertEquals(mtime, status.getModificationTime());
+        assertEquals(atime, status.getAccessTime());
+
+        mtime -= 5000;
+
+        fs.setTimes(file, mtime, -1);
+
+        status = fs.getFileStatus(file);
+
+        assertEquals(mtime, status.getModificationTime());
+        assertEquals(atime, status.getAccessTime());
+
+        atime -= 5000;
+
+        fs.setTimes(file, -1, atime);
+
+        status = fs.getFileStatus(file);
+
+        assertEquals(mtime, status.getModificationTime());
+        assertEquals(atime, status.getAccessTime());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSetOwnerIfOutputStreamIsNotClosed() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "myFile");
+
+        FSDataOutputStream os = fs.create(file);
+
+        fs.setOwner(file, "aUser", "aGroup");
+
+        os.close();
+
+        assertEquals("aUser", fs.getFileStatus(file).getOwner());
+        assertEquals("aGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetOwnerCheckNonRecursiveness() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "/tmp/my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        Path tmpDir = new Path(fsHome, "/tmp");
+
+        fs.setOwner(file, "fUser", "fGroup");
+        fs.setOwner(tmpDir, "dUser", "dGroup");
+
+        assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner());
+        assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup());
+
+        assertEquals("fUser", fs.getFileStatus(file).getOwner());
+        assertEquals("fGroup", fs.getFileStatus(file).getGroup());
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenCheckParametersPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.open(null, 1024);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenNoSuchPath() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.open(file, 1024);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpenIfPathIsAlreadyOpened() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        FSDataInputStream is1 = fs.open(file);
+        FSDataInputStream is2 = fs.open(file);
+
+        is1.close();
+        is2.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testOpen() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        int cnt = 2 * 1024;
+
+        try (FSDataOutputStream out = fs.create(file, true, 1024)) {
+
+            for (long i = 0; i < cnt; i++)
+                out.writeLong(i);
+        }
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner());
+
+        try (FSDataInputStream in = fs.open(file, 1024)) {
+
+            for (long i = 0; i < cnt; i++)
+                assertEquals(i, in.readLong());
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppendCheckParametersPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.append(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppendIfPathPointsToDirectory() throws Exception {
+        final Path fsHome = new Path(primaryFsUri);
+        final Path dir = new Path(fsHome, "/tmp");
+        Path file = new Path(dir, "my");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        GridTestUtils.assertThrowsInherited(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.append(new Path(fsHome, dir), 1024);
+            }
+        }, IOException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        FSDataOutputStream os = fs.create(file);
+
+        os.close();
+
+        FSDataOutputStream appendOs = fs.append(file);
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+                return fs.append(file);
+            }
+        }, IOException.class, null);
+
+        appendOs.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testAppend() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path file = new Path(fsHome, "someFile");
+
+        int cnt = 1024;
+
+        FSDataOutputStream out = fs.create(file, true, 1024);
+
+        for (int i = 0; i < cnt; i++)
+            out.writeLong(i);
+
+        out.close();
+
+        out = fs.append(file);
+
+        for (int i = cnt; i < cnt * 2; i++)
+            out.writeLong(i);
+
+        out.close();
+
+        FSDataInputStream in = fs.open(file, 1024);
+
+        for (int i = 0; i < cnt * 2; i++)
+            assertEquals(i, in.readLong());
+
+        in.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameCheckParametersSrcPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.rename(null, file);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: src");
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameCheckParametersDstPathIsNull() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        final Path file = new Path(fsHome, "someFile");
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+                return fs.rename(file, null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: dst");
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathDoesNotExist() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        assertPathDoesNotExist(fs, srcFile);
+
+        assertFalse(fs.rename(srcFile, dstFile));
+
+        assertPathDoesNotExist(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathIsAlreadyBeingOpenedToWrite() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile);
+
+        os.close();
+
+        os = fs.append(srcFile);
+
+        assertTrue(fs.rename(srcFile, dstFile));
+
+        assertPathExists(fs, dstFile);
+
+        String testStr = "Test";
+
+        try {
+            os.writeBytes(testStr);
+        }
+        finally {
+            os.close();
+        }
+
+        try (FSDataInputStream is = fs.open(dstFile)) {
+            byte[] buf = new byte[testStr.getBytes().length];
+
+            is.readFully(buf);
+
+            assertEquals(testStr, new String(buf));
+        }
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameFileIfDstPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile);
+
+        os.close();
+
+        os = fs.create(dstFile);
+
+        os.close();
+
+        assertFalse(fs.rename(srcFile, dstFile));
+
+        assertPathExists(fs, srcFile);
+        assertPathExists(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameFile() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "/tmp/srcFile");
+        Path dstFile = new Path(fsHome, "/tmp/dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile);
+
+        os.close();
+
+        assertTrue(fs.rename(srcFile, dstFile));
+
+        assertPathDoesNotExist(fs, srcFile);
+        assertPathExists(fs, dstFile);
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcFile = new Path(fsHome, "srcFile");
+        Path dstFile = new Path(fsHome, "dstFile");
+
+        FSDataOutputStream os = fs.create(srcFile);
+
+        int cnt = 1024;
+
+        for (int i = 0; i < cnt; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        FSDataInputStream is = fs.open(srcFile);
+
+        for (int i = 0; i < cnt; i++) {
+            if (i == 100)
+                // Rename file during the read process.
+                assertTrue(fs.rename(srcFile, dstFile));
+
+            assertEquals(i, is.readInt());
+        }
+
+        assertPathDoesNotExist(fs, srcFile);
+        assertPathExists(fs, dstFile);
+
+        os.close();
+        is.close();
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameDirectoryIfDstPathExists() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path srcDir = new Path(fsHome, "/tmp/");
+        Path dstDir = new Path(fsHome, "/tmpNew/");
+
+        FSDataOutputStream os = fs.create(new Path(srcDir, "file1"));
+
+        os.close();
+
+        os = fs.create(new Path(dstDir, "file2"));
+
+        os.close();
+
+        assertTrue("Rename succeeded [srcDir=" + srcDir + ", dstDir=" + dstDir + ']', fs.rename(srcDir, dstDir));
+
+        assertPathExists(fs, dstDir);
+        assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp"));
+        assertPathExists(fs, new Path(fsHome, "/tmpNew/tmp/file1"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testRenameDirectory() throws Exception {
+        Path fsHome = new Path(primaryFsUri);
+        Path dir = new Path(fsHome, "/tmp/");
+        Path newDir = new Path(fsHome, "/tmpNew/");
+
+        FSDataOutputStream os = fs.create(new Path(dir, "myFile"));
+
+        os.close();
+
+        assertTrue("Rename failed [dir=" + dir + ", newDir=" + newDir + ']', fs.rename(dir, newDir));
+
+        assertPathDoesNotExist(fs, dir);
+        assertPathExists(fs, newDir);
+    }
+
+    /** @throws Exception If failed. */
+    public void testListStatusIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.listStatus((Path)null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testListStatusIfPathDoesNotExist() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    return fs.listStatus(new Path("/tmp/some/dir"));
+                }
+            }, FileNotFoundException.class, null);
+    }
+
+    /**
+     * Test directory listing.
+     *
+     * @throws Exception If failed.
+     */
+    public void testListStatus() throws Exception {
+        Path igfsHome = new Path(PRIMARY_URI);
+
+        // Test listing of an empty directory.
+        Path dir = new Path(igfsHome, "dir");
+
+        assert fs.mkdirs(dir);
+
+        FileStatus[] list = fs.listStatus(dir);
+
+        assert list.length == 0;
+
+        // Test listing of a not empty directory.
+        Path subDir = new Path(dir, "subDir");
+
+        assert fs.mkdirs(subDir);
+
+        Path file = new Path(dir, "file");
+
+        FSDataOutputStream fos = fs.create(file);
+
+        fos.close();
+
+        list = fs.listStatus(dir);
+
+        assert list.length == 2;
+
+        String listRes1 = list[0].getPath().getName();
+        String listRes2 = list[1].getPath().getName();
+
+        assert "subDir".equals(listRes1) && "file".equals(listRes2) || "subDir".equals(listRes2) &&
+            "file".equals(listRes1);
+
+        // Test listing of a file.
+        list = fs.listStatus(file);
+
+        assert list.length == 1;
+
+        assert "file".equals(list[0].getPath().getName());
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetWorkingDirectoryIfPathIsNull() throws Exception {
+        fs.setWorkingDirectory(null);
+
+        Path file = new Path("file");
+
+        FSDataOutputStream os = fs.create(file);
+        os.close();
+
+        String path = fs.getFileStatus(file).getPath().toString();
+
+        assertTrue(path.endsWith("/user/" + getClientFsUser() + "/file"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetWorkingDirectoryIfPathDoesNotExist() throws Exception {
+        // Should not throw any exceptions.
+        fs.setWorkingDirectory(new Path("/someDir"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testSetWorkingDirectory() throws Exception {
+        Path dir = new Path("/tmp/nested/dir");
+        Path file = new Path("file");
+
+        fs.mkdirs(dir);
+
+        fs.setWorkingDirectory(dir);
+
+        FSDataOutputStream os = fs.create(file);
+        os.close();
+
+        String filePath = fs.getFileStatus(new Path(dir, file)).getPath().toString();
+
+        assertTrue(filePath.contains("/tmp/nested/dir/file"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetWorkingDirectoryIfDefault() throws Exception {
+        String path = fs.getWorkingDirectory().toString();
+
+        assertTrue(path.endsWith("/user/" + getClientFsUser()));
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetWorkingDirectory() throws Exception {
+        Path dir = new Path("/tmp/some/dir");
+
+        fs.mkdirs(dir);
+
+        fs.setWorkingDirectory(dir);
+
+        String path = fs.getWorkingDirectory().toString();
+
+        assertTrue(path.endsWith("/tmp/some/dir"));
+    }
+
+    /** @throws Exception If failed. */
+    public void testMkdirsIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.mkdirs(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testMkdirsIfPermissionIsNull() throws Exception {
+        Path dir = new Path("/tmp");
+
+        assertTrue(fs.mkdirs(dir, null));
+
+        assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("OctalInteger")
+    public void testMkdirs() throws Exception {
+        Path fsHome = new Path(PRIMARY_URI);
+        final Path dir = new Path(fsHome, "/tmp/staging");
+        final Path nestedDir = new Path(dir, "nested");
+
+        final FsPermission dirPerm = FsPermission.createImmutable((short)0700);
+        final FsPermission nestedDirPerm = FsPermission.createImmutable((short)111);
+
+        assertTrue(fs.mkdirs(dir, dirPerm));
+        assertTrue(fs.mkdirs(nestedDir, nestedDirPerm));
+
+        assertEquals(dirPerm, fs.getFileStatus(dir).getPermission());
+        assertEquals(nestedDirPerm, fs.getFileStatus(nestedDir).getPermission());
+
+        assertEquals(getClientFsUser(), fs.getFileStatus(dir).getOwner());
+        assertEquals(getClientFsUser(), fs.getFileStatus(nestedDir).getOwner());
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileStatusIfPathIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(null);
+            }
+        }, NullPointerException.class, "Ouch! Argument cannot be null: f");
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileStatusIfPathDoesNotExist() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(new Path("someDir"));
+            }
+        }, FileNotFoundException.class, "File not found: someDir");
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocationsIfFileStatusIsNull() throws Exception {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                // Argument is checked by Hadoop.
+                return fs.getFileBlockLocations((Path)null, 1, 2);
+            }
+        }, NullPointerException.class, null);
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception {
+        Path path = new Path("someFile");
+
+        fs.create(path).close();
+
+        final FileStatus status = fs.getFileStatus(path);
+
+        fs.delete(path, true);
+
+        BlockLocation[] locations = fs.getFileBlockLocations(status, 1, 2);
+
+        assertEquals(0, locations.length);
+    }
+
+    /** @throws Exception If failed. */
+    public void testGetFileBlockLocations() throws Exception {
+        Path igfsHome = new Path(PRIMARY_URI);
+
+        Path file = new Path(igfsHome, "someFile");
+
+        try (OutputStream out = new BufferedOutputStream(fs.create(file, true, 1024 * 1024))) {
+            byte[] data = new byte[128 * 1024];
+
+            for (int i = 0; i < 100; i++)
+                out.write(data);
+
+            out.flush();
+        }
+
+        try (FSDataInputStream in = fs.open(file, 1024 * 1024)) {
+            byte[] data = new byte[128 * 1024];
+
+            int read;
+
+            do {
+                read = in.read(data);
+            }
+            while (read > 0);
+        }
+
+        FileStatus status = fs.getFileStatus(file);
+
+        int grpLen = 128 * 512 * 1024;
+
+        int grpCnt = (int)((status.getLen() + grpLen - 1) / grpLen);
+
+        BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen());
+
+        assertEquals(grpCnt, locations.length);
+    }
+
+    /** @throws Exception If failed. */
+    @SuppressWarnings("deprecation")
+    public void testGetDefaultBlockSize() throws Exception {
+        assertEquals(1L << 26, fs.getDefaultBlockSize());
+    }
+
+    /** @throws Exception If failed. */
+    public void testZeroReplicationFactor() throws Exception {
+        // This test doesn't make sense for any mode except of PRIMARY.
+        if (mode == PRIMARY) {
+            Path igfsHome = new Path(PRIMARY_URI);
+
+            Path file = new Path(igfsHome, "someFile");
+
+            try (FSDataOutputStream out = fs.create(file, (short)0)) {
+                out.write(new byte[1024 * 1024]);
+            }
+
+            IgniteFileSystem igfs = grid(0).fileSystem("igfs");
+
+            IgfsPath filePath = new IgfsPath("/someFile");
+
+            IgfsFile fileInfo = igfs.info(filePath);
+
+            awaitPartitionMapExchange();
+
+            Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length());
+
+            assertEquals(1, locations.size());
+
+            IgfsBlockLocation location = F.first(locations);
+
+            assertEquals(1, location.nodeIds().size());
+        }
+    }
+
+    /**
+     * Ensure that when running in multithreaded mode only one create() operation succeed.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedCreate() throws Exception {
+        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
+
+        assert fs.mkdirs(dir);
+
+        final Path file = new Path(dir, "file");
+
+        fs.create(file).close();
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override
+            public void run() {
+                int idx = cnt.getAndIncrement();
+
+                byte[] data = new byte[256];
+
+                Arrays.fill(data, (byte)idx);
+
+                FSDataOutputStream os = null;
+
+                try {
+                    os = fs.create(file, true);
+                }
+                catch (IOException ignore) {
+                    errs.add(idx);
+                }
+
+                U.awaitQuiet(barrier);
+
+                try {
+                    if (os != null)
+                        os.write(data);
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+                finally {
+                    U.closeQuiet(os);
+                }
+            }
+        }, THREAD_CNT);
+
+        assert !err.get();
+
+        // Only one thread could obtain write lock on the file.
+        assert errs.size() == THREAD_CNT - 1;
+
+        int idx = -1;
+
+        for (int i = 0; i < THREAD_CNT; i++) {
+            if (!errs.remove(i)) {
+                idx = i;
+
+                break;
+            }
+        }
+
+        byte[] expData = new byte[256];
+
+        Arrays.fill(expData, (byte)idx);
+
+        FSDataInputStream is = fs.open(file);
+
+        byte[] data = new byte[256];
+
+        is.read(data);
+
+        is.close();
+
+        assert Arrays.equals(expData, data) : "Expected=" + Arrays.toString(expData) + ", actual=" +
+            Arrays.toString(data);
+    }
+
+    /**
+     * Ensure that when running in multithreaded mode only one append() operation succeed.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedAppend() throws Exception {
+        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
+
+        assert fs.mkdirs(dir);
+
+        final Path file = new Path(dir, "file");
+
+        fs.create(file).close();
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        final Collection<Integer> errs = new GridConcurrentHashSet<>(THREAD_CNT, 1.0f, THREAD_CNT);
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                int idx = cnt.getAndIncrement();
+
+                byte[] data = new byte[256];
+
+                Arrays.fill(data, (byte)idx);
+
+                U.awaitQuiet(barrier);
+
+                FSDataOutputStream os = null;
+
+                try {
+                    os = fs.append(file);
+                }
+                catch (IOException ignore) {
+                    errs.add(idx);
+                }
+
+                U.awaitQuiet(barrier);
+
+                try {
+                    if (os != null)
+                        os.write(data);
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+                finally {
+                    U.closeQuiet(os);
+                }
+            }
+        }, THREAD_CNT);
+
+        assert !err.get();
+
+        // Only one thread could obtain write lock on the file.
+        assert errs.size() == THREAD_CNT - 1;
+
+        int idx = -1;
+
+        for (int i = 0; i < THREAD_CNT; i++) {
+            if (!errs.remove(i)) {
+                idx = i;
+
+                break;
+            }
+        }
+
+        byte[] expData = new byte[256];
+
+        Arrays.fill(expData, (byte)idx);
+
+        FSDataInputStream is = fs.open(file);
+
+        byte[] data = new byte[256];
+
+        is.read(data);
+
+        is.close();
+
+        assert Arrays.equals(expData, data);
+    }
+
+    /**
+     * Test concurrent reads within the file.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedOpen() throws Exception {
+        final byte[] dataChunk = new byte[256];
+
+        for (int i = 0; i < dataChunk.length; i++)
+            dataChunk[i] = (byte)i;
+
+        Path dir = new Path(new Path(PRIMARY_URI), "/dir");
+
+        assert fs.mkdirs(dir);
+
+        final Path file = new Path(dir, "file");
+
+        FSDataOutputStream os = fs.create(file);
+
+        // Write 256 * 2048 = 512Kb of data.
+        for (int i = 0; i < 2048; i++)
+            os.write(dataChunk);
+
+        os.close();
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override
+            public void run() {
+                FSDataInputStream is = null;
+
+                try {
+                    int pos = ThreadLocalRandom8.current().nextInt(2048);
+
+                    try {
+                        is = fs.open(file);
+                    }
+                    finally {
+                        U.awaitQuiet(barrier);
+                    }
+
+                    is.seek(256 * pos);
+
+                    byte[] buf = new byte[256];
+
+                    for (int i = pos; i < 2048; i++) {
+                        // First perform normal read.
+                        int read = is.read(buf);
+
+                        assert read == 256;
+
+                        Arrays.equals(dataChunk, buf);
+                    }
+
+                    int res = is.read(buf);
+
+                    assert res == -1;
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+                finally {
+                    U.closeQuiet(is);
+                }
+            }
+        }, THREAD_CNT);
+
+        assert !err.get();
+    }
+
+    /**
+     * Test concurrent creation of multiple directories.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultithreadedMkdirs() throws Exception {
+        final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
+
+        assert fs.mkdirs(dir);
+
+        final int depth = 3;
+        final int entryCnt = 5;
+
+        final AtomicReference<IOException> err = new AtomicReference();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+                queue.add(F.t(0, dir));
+
+                U.awaitQuiet(barrier);
+
+                while (!queue.isEmpty()) {
+                    IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+                    int curDepth = t.getKey();
+                    Path curPath = t.getValue();
+
+                    if (curDepth <= depth) {
+                        int newDepth = curDepth + 1;
+
+                        // Create directories.
+                        for (int i = 0; i < entryCnt; i++) {
+                            Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                            try {
+                                if (fs.mkdirs(subDir))
+                                    queue.addLast(F.t(newDepth, subDir));
+                            }
+                            catch (IOException e) {
+                                err.compareAndSet(null, e);
+                            }
+                        }
+                    }
+                }
+            }
+        }, THREAD_CNT);
+
+        // Ensure there were no errors.
+        assert err.get() == null : err.get();
+
+        // Ensure correct folders structure.
+        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+        queue.add(F.t(0, dir));
+
+        while (!queue.isEmpty()) {
+            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+            int curDepth = t.getKey();
+            Path curPath = t.getValue();
+
+            if (curDepth <= depth) {
+                int newDepth = curDepth + 1;
+
+                // Create directories.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                    assert fs.exists(subDir) : "Expected directory doesn't exist: " + subDir;
+
+                    queue.add(F.t(newDepth, subDir));
+                }
+            }
+        }
+    }
+
+    /**
+     * Test concurrent deletion of the same directory with advanced structure.
+     *
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("TooBroadScope")
+    public void testMultithreadedDelete() throws Exception {
+        final Path dir = new Path(new Path(PRIMARY_URI), "/dir");
+
+        assert fs.mkdirs(dir);
+
+        int depth = 3;
+        int entryCnt = 5;
+
+        Deque<IgniteBiTuple<Integer, Path>> queue = new ArrayDeque<>();
+
+        queue.add(F.t(0, dir));
+
+        while (!queue.isEmpty()) {
+            IgniteBiTuple<Integer, Path> t = queue.pollFirst();
+
+            int curDepth = t.getKey();
+            Path curPath = t.getValue();
+
+            if (curDepth < depth) {
+                int newDepth = curDepth + 1;
+
+                // Create directories.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path subDir = new Path(curPath, "dir-" + newDepth + "-" + i);
+
+                    fs.mkdirs(subDir);
+
+                    queue.addLast(F.t(newDepth, subDir));
+                }
+            }
+            else {
+                // Create files.
+                for (int i = 0; i < entryCnt; i++) {
+                    Path file = new Path(curPath, "file " + i);
+
+                    fs.create(file).close();
+                }
+            }
+        }
+
+        final AtomicBoolean err = new AtomicBoolean();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                try {
+                    U.awaitQuiet(barrier);
+
+                    fs.delete(dir, true);
+                }
+                catch (IOException ignore) {
+                    err.set(true);
+                }
+            }
+        }, THREAD_CNT);
+
+        // Ensure there were no errors.
+        assert !err.get();
+
+        // Ensure the directory was actually deleted.
+
+        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                try {
+                    return !fs.exists(dir);
+                }
+                catch (IOException e) {
+                    throw new AssertionError(e);
+                }
+            }
+        }, 5000L);
+    }
+
+    /** @throws Exception If failed. */
+    public void testConsistency() throws Exception {
+        // Default buffers values
+        checkConsistency(-1, 1, -1, -1, 1, -1);
+        checkConsistency(-1, 10, -1, -1, 10, -1);
+        checkConsistency(-1, 100, -1, -1, 100, -1);
+        checkConsistency(-1, 1000, -1, -1, 1000, -1);
+        checkConsistency(-1, 10000, -1, -1, 10000, -1);
+        checkConsistency(-1, 100000, -1, -1, 100000, -1);
+
+        checkConsistency(65 * 1024 + 13, 100000, -1, -1, 100000, -1);
+
+        checkConsistency(-1, 100000, 2 * 4 * 1024 + 17, -1, 100000, -1);
+
+        checkConsistency(-1, 100000, -1, 65 * 1024 + 13, 100000, -1);
+
+        checkConsistency(-1, 100000, -1, -1, 100000, 2 * 4 * 1024 + 17);
+
+        checkConsistency(65 * 1024 + 13, 100000, 2 * 4 * 1024 + 13, 65 * 1024 + 149, 100000, 2 * 4 * 1024 + 157);
+    }
+
+    /**
+     * Verifies that client reconnects after connection to the server has been lost.
+     *
+     * @throws Exception If error occurs.
+     */
+    public void testClientReconnect() throws Exception {
+        Path filePath = new Path(PRIMARY_URI, "file1");
+
+        final FSDataOutputStream s = fs.create(filePath); // Open the stream before stopping IGFS.
+
+        try {
+            G.stopAll(true); // Stop the server.
+
+            startNodes(); // Start server again.
+
+            // Check that client is again operational.
+            assertTrue(fs.mkdirs(new Path(PRIMARY_URI, "dir1/dir2")));
+
+            // However, the streams, opened before disconnect, should not be valid.
+            GridTestUtils.assertThrows(log, new Callable<Object>() {
+                @Nullable @Override public Object call() throws Exception {
+                    s.write("test".getBytes());
+
+                    s.flush(); // Flush data to the broken output stream.
+
+                    return null;
+                }
+            }, IOException.class, null);
+
+            assertFalse(fs.exists(filePath));
+        }
+        finally {
+            U.closeQuiet(s); // Safety.
+        }
+    }
+
+    /**
+     * Verifies that client reconnects after connection to the server has been lost (multithreaded mode).
+     *
+     * @throws Exception If error occurs.
+     */
+    public void testClientReconnectMultithreaded() throws Exception {
+        final ConcurrentLinkedQueue<FileSystem> q = new ConcurrentLinkedQueue<>();
+
+        Configuration cfg = new Configuration();
+
+        for (Map.Entry<String, String> entry : primaryFsCfg)
+            cfg.set(entry.getKey(), entry.getValue());
+
+        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        final int nClients = 1;
+
+        // Initialize clients.
+        for (int i = 0; i < nClients; i++)
+            q.add(FileSystem.get(primaryFsUri, cfg));
+
+        G.stopAll(true); // Stop the server.
+
+        startNodes(); // Start server again.
+
+        GridTestUtils.runMultiThreaded(new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                FileSystem fs = q.poll();
+
+                try {
+                    // Check that client is again operational.
+                    assertTrue(fs.mkdirs(new Path("/" + Thread.currentThread().getName())));
+
+                    return true;
+                }
+                finally {
+                    U.closeQuiet(fs);
+                }
+            }
+        }, nClients, "test-client");
+    }
+
+    /**
+     * Checks consistency of create --> open --> append --> open operations with different buffer sizes.
+     *
+     * @param createBufSize Buffer size used for file creation.
+     * @param writeCntsInCreate Count of times to write in file creation.
+     * @param openAfterCreateBufSize Buffer size used for file opening after creation.
+     * @param appendBufSize Buffer size used for file appending.
+     * @param writeCntsInAppend Count of times to write in file appending.
+     * @param openAfterAppendBufSize Buffer size used for file opening after appending.
+     * @throws Exception If failed.
+     */
+    private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize,
+        int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception {
+        final Path igfsHome = new Path(PRIMARY_URI);
+
+        Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile");
+
+        FSDataOutputStream os = fs.create(file, true, createBufSize);
+
+        for (int i = 0; i < writeCntsInCreate; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        FSDataInputStream is = fs.open(file, openAfterCreateBufSize);
+
+        for (int i = 0; i < writeCntsInCreate; i++)
+            assertEquals(i, is.readInt());
+
+        is.close();
+
+        os = fs.append(file, appendBufSize);
+
+        for (int i = writeCntsInCreate; i < writeCntsInCreate + writeCntsInAppend; i++)
+            os.writeInt(i);
+
+        os.close();
+
+        is = fs.open(file, openAfterAppendBufSize);
+
+        for (int i = 0; i < writeCntsInCreate + writeCntsInAppend; i++)
+            assertEquals(i, is.readInt());
+
+        is.close();
+    }
+
+    /**
+     * Gets instance of Hadoop local file system.
+     *
+     * @param home File system home.
+     * @return File system.
+     * @throws IOException If failed.
+     */
+    private FileSystem local(Path home) throws IOException {
+        Configuration cfg = new Configuration();
+
+        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
+
+        return FileSystem.get(home.toUri(), cfg);
+    }
+
+    /**
+     * Copy files from one FS to another.
+     *
+     * @param msg Info message to display after copying finishes.
+     * @param srcFs Source file system.
+     * @param src Source path to copy from.
+     * @param destFs Destination file system.
+     * @param dest Destination path to copy to.
+     * @throws IOException If failed.
+     */
+    private void copy(String msg, FileSystem srcFs, Path src, FileSystem destFs, Path dest) throws IOException {
+        assert destFs.delete(dest, true) || !destFs.exists(dest) : "Failed to remove: " + dest;
+
+        destFs.mkdirs(dest);
+
+        Configuration conf = new Configuration(true);
+
+        long time = System.currentTimeMillis();
+
+        FileUtil.copy(srcFs, src, destFs, dest, false, true, conf);
+
+        time = System.currentTimeMillis() - time;
+
+        info("Copying finished, " + msg + " [time=" + time + "ms, src=" + src + ", dest=" + dest + ']');
+    }
+
+    /**
+     * Compare content of two folders.
+     *
+     * @param cfg Paths configuration to compare.
+     * @throws IOException If failed.
+     */
+    @SuppressWarnings("deprecation")
+    private void compareContent(Config cfg) throws IOException {
+        Deque<Config> queue = new LinkedList<>();
+
+        queue.add(cfg);
+
+        for (Config c = queue.poll(); c != null; c = queue.poll()) {
+            boolean exists;
+
+            assertEquals("Check existence [src=" + c.src + ", dest=" + c.dest + ']',
+                exists = c.srcFs.exists(c.src), c.destFs.exists(c.dest));
+
+            assertEquals("Check types (files?) [src=" + c.src + ", dest=" + c.dest + ']',
+                c.srcFs.isFile(c.src), c.destFs.isFile(c.dest));
+
+            if (exists) {
+                ContentSummary srcSummary = c.srcFs.getContentSummary(c.src);
+                ContentSummary dstSummary = c.destFs.getContentSummary(c.dest);
+
+                assertEquals("Directories number comparison failed",
+                    srcSummary.getDirectoryCount(), dstSummary.getDirectoryCount());
+
+                assertEquals("Files number comparison failed",
+                    srcSummary.getFileCount(), dstSummary.getFileCount());
+
+                assertEquals("Space consumed comparison failed",
+                    srcSummary.getSpaceConsumed(), dstSummary.getSpaceConsumed());
+
+                assertEquals("Length comparison failed",
+                    srcSummary.getLength(), dstSummary.getLength());
+
+                // Intentionally skipping quotas checks as they can vary.
+            }
+            else {
+                assertContentSummaryFails(c.srcFs, c.src);
+                assertContentSummaryFails(c.destFs, c.dest);
+            }
+
+            if (!exists)
+                continue;
+
+            FileStatus[] srcSt = c.srcFs.listStatus(c.src);
+            FileStatus[] destSt = c.destFs.listStatus(c.dest);
+
+            assert srcSt != null && destSt != null : "Both not null" +
+                " [srcSt=" + Arrays.toString(srcSt) + ", destSt=" + Arrays.toString(destSt) + ']';
+
+            assertEquals("Check listing [src=" + c.src + ", dest=" + c.dest + ']', srcSt.length, destSt.length);
+
+            // Listing of the file returns the only element with this file.
+            if (srcSt.length == 1 && c.src.equals(srcSt[0].getPath())) {
+                assertEquals(c.dest, destSt[0].getPath());
+
+                assertTrue("Expects file [src=" + c.src + ", srcSt[0]=" + srcSt[0] + ']', !srcSt[0].isDir());
+                assertTrue("Expects file [dest=" + c.dest + ", destSt[0]=" + destSt[0] + ']', !destSt[0].isDir());
+
+                FSDataInputStream srcIn = null;
+                FSDataInputStream destIn = null;
+
+                try {
+                    srcIn = c.srcFs.open(c.src);
+                    destIn = c.destFs.open(c.dest);
+
+                    GridTestIoUtils.assertEqualStreams(srcIn, destIn, srcSt[0].getLen());
+                }
+                finally {
+                    U.closeQuiet(srcIn);
+                    U.closeQuiet(destIn);
+                }
+
+                continue; // Skip the following directories validations.
+            }
+
+            // Sort both arrays.
+            Arrays.sort(srcSt, STATUS_COMPARATOR);
+            Arrays.sort(destSt, STATUS_COMPARATOR);
+
+            for (int i = 0; i < srcSt.length; i++)
+                // Dig in deep to the last leaf, instead of collecting full tree in memory.
+                queue.addFirst(new Config(c.srcFs, srcSt[i].getPath(), c.destFs, destSt[i].getPath()));
+
+            // Add non-existent file to check in the current folder.
+            String rndFile = "Non-existent file #" + UUID.randomUUID().toString();
+
+            queue.addFirst(new Config(c.srcFs, new Path(c.src, rndFile), c.destFs, new Path(c.dest, rndFile)));
+        }
+    }
+
+    /**
+     * Test expected failures for 'close' operation.
+     *
+     * @param fs File system to test.
+     * @param msg Expected exception message.
+     */
+    public void assertCloseFails(final FileSystem fs, String msg) {
+        GridTestUtils.assertThrows(log, new Callable() {
+            @Override public Object call() throws Exception {
+                fs.close();
+
+                return null;
+            }
+        }, IOException.class, msg);
+    }
+
+    /**
+     * Test expected failures for 'get content summary' operation.
+     *
+     * @param fs File system to test.
+     * @param path Path to evaluate content summary for.
+     */
+    private void assertContentSummaryFails(final FileSystem fs, final Path path) {
+        GridTestUtils.assertThrows(log, new Callable<ContentSummary>() {
+            @Override public ContentSummary call() throws Exception {
+                return fs.getContentSummary(path);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /**
+     * Assert that a given path exists in a given FileSystem.
+     *
+     * @param fs FileSystem to check.
+     * @param p Path to check.
+     * @throws IOException if the path does not exist.
+     */
+    private void assertPathExists(FileSystem fs, Path p) throws IOException {
+        FileStatus fileStatus = fs.getFileStatus(p);
+
+        assertEquals(p, fileStatus.getPath());
+        assertNotSame(0, fileStatus.getModificationTime());
+    }
+
+    /**
+     * Check path does not exist in a given FileSystem.
+     *
+     * @param fs FileSystem to check.
+     * @param path Path to check.
+     */
+    private void assertPathDoesNotExist(final FileSystem fs, final Path path) {
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                return fs.getFileStatus(path);
+            }
+        }, FileNotFoundException.class, null);
+    }
+
+    /** Helper class to encapsulate source and destination folders. */
+    @SuppressWarnings({"PublicInnerClass", "PublicField"})
+    public static final class Config {
+        /** Source file system. */
+        public final FileSystem srcFs;
+
+        /** Source path to work with. */
+        public final Path src;
+
+        /** Destination file system. */
+        public final FileSystem destFs;
+
+        /** Destination path to work with. */
+        public final Path dest;
+
+        /**
+         * Copying task configuration.
+         *
+         * @param srcFs Source file system.
+         * @param src Source path.
+         * @param destFs Destination file system.
+         * @param dest Destination path.
+         */
+        public Config(FileSystem srcFs, Path src, FileSystem destFs, Path dest) {
+            this.srcFs = srcFs;
+            this.src = src;
+            this.destFs = destFs;
+            this.dest = dest;
+        }
+    }
+
+    /**
+     * Convert path for exception message testing purposes.
+     *
+     * @param path Path.
+     * @return Converted path.
+     * @throws Exception If failed.
+     */
+    private Path convertPath(Path path) throws Exception {
+        if (mode != PROXY)
+            return path;
+        else {
+            URI secondaryUri = new URI(SECONDARY_URI);
+
+            URI pathUri = path.toUri();
+
+            return new Path(new URI(pathUri.getScheme() != null ? secondaryUri.getScheme() : null,
+                pathUri.getAuthority() != null ? secondaryUri.getAuthority() : null, pathUri.getPath(), null, null));
+        }
+    }
+
+    /**
+     * Create configuration for test.
+     *
+     * @param authority Authority.
+     * @param skipEmbed Whether to skip embedded mode.
+     * @param skipLocShmem Whether to skip local shmem mode.
+     * @return Configuration.
+     */
+    private static Configuration configuration(String authority, boolean skipEmbed, boolean skipLocShmem) {
+        Configuration cfg = new Configuration();
+
+        cfg.set("fs.defaultFS", "igfs://" + authority + "/");
+        cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
+        cfg.set("fs.AbstractFileSystem.igfs.impl",
+            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());
+
+        cfg.setBoolean("fs.igfs.impl.disable.cache", true);
+
+        if (skipEmbed)
+            cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
+
+        if (skipLocShmem)
+            cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
+
+        return cfg;
+    }
+}
\ No newline at end of file


[09/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
deleted file mode 100644
index 595474c..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Queue;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentLinkedDeque;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.split.JobSplit;
-import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
-import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1Splitter;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-import org.jsr166.ConcurrentHashMap8;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.jobLocalDir;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.taskLocalDir;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.transformException;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.FsCacheKey;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.createHadoopLazyConcurrentMap;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.fileSystemForMrUserWithCaching;
-
-/**
- * Hadoop job implementation for v2 API.
- */
-public class HadoopV2Job implements HadoopJob {
-    /** */
-    private final JobConf jobConf;
-
-    /** */
-    private final JobContextImpl jobCtx;
-
-    /** Hadoop job ID. */
-    private final HadoopJobId jobId;
-
-    /** Job info. */
-    protected final HadoopJobInfo jobInfo;
-
-    /** Native library names. */
-    private final String[] libNames;
-
-    /** */
-    private final JobID hadoopJobID;
-
-    /** */
-    private final HadoopV2JobResourceManager rsrcMgr;
-
-    /** */
-    private final ConcurrentMap<T2<HadoopTaskType, Integer>, GridFutureAdapter<HadoopTaskContext>> ctxs =
-        new ConcurrentHashMap8<>();
-
-    /** Pooling task context class and thus class loading environment. */
-    private final Queue<Class<? extends HadoopTaskContext>> taskCtxClsPool = new ConcurrentLinkedQueue<>();
-
-    /** All created contexts. */
-    private final Queue<Class<? extends HadoopTaskContext>> fullCtxClsQueue = new ConcurrentLinkedDeque<>();
-
-    /** File system cache map. */
-    private final HadoopLazyConcurrentMap<FsCacheKey, FileSystem> fsMap = createHadoopLazyConcurrentMap();
-
-    /** Local node ID */
-    private volatile UUID locNodeId;
-
-    /** Serialized JobConf. */
-    private volatile byte[] jobConfData;
-
-    /**
-     * Constructor.
-     *
-     * @param jobId Job ID.
-     * @param jobInfo Job info.
-     * @param log Logger.
-     * @param libNames Optional additional native library names.
-     */
-    public HadoopV2Job(HadoopJobId jobId, final HadoopDefaultJobInfo jobInfo, IgniteLogger log,
-        @Nullable String[] libNames) {
-        assert jobId != null;
-        assert jobInfo != null;
-
-        this.jobId = jobId;
-        this.jobInfo = jobInfo;
-        this.libNames = libNames;
-
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
-
-        try {
-            hadoopJobID = new JobID(jobId.globalId().toString(), jobId.localId());
-
-            jobConf = new JobConf();
-
-            HadoopFileSystemsUtils.setupFileSystems(jobConf);
-
-            for (Map.Entry<String,String> e : jobInfo.properties().entrySet())
-                jobConf.set(e.getKey(), e.getValue());
-
-            jobCtx = new JobContextImpl(jobConf, hadoopJobID);
-
-            rsrcMgr = new HadoopV2JobResourceManager(jobId, jobCtx, log, this);
-        }
-        finally {
-            HadoopUtils.setContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobId id() {
-        return jobId;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopJobInfo info() {
-        return jobInfo;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<HadoopInputSplit> input() throws IgniteCheckedException {
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf.getClassLoader());
-
-        try {
-            String jobDirPath = jobConf.get(MRJobConfig.MAPREDUCE_JOB_DIR);
-
-            if (jobDirPath == null) { // Probably job was submitted not by hadoop client.
-                // Assume that we have needed classes and try to generate input splits ourself.
-                if (jobConf.getUseNewMapper())
-                    return HadoopV2Splitter.splitJob(jobCtx);
-                else
-                    return HadoopV1Splitter.splitJob(jobConf);
-            }
-
-            Path jobDir = new Path(jobDirPath);
-
-            try {
-                FileSystem fs = fileSystem(jobDir.toUri(), jobConf);
-
-                JobSplit.TaskSplitMetaInfo[] metaInfos = SplitMetaInfoReader.readSplitMetaInfo(hadoopJobID, fs, jobConf,
-                    jobDir);
-
-                if (F.isEmpty(metaInfos))
-                    throw new IgniteCheckedException("No input splits found.");
-
-                Path splitsFile = JobSubmissionFiles.getJobSplitFile(jobDir);
-
-                try (FSDataInputStream in = fs.open(splitsFile)) {
-                    Collection<HadoopInputSplit> res = new ArrayList<>(metaInfos.length);
-
-                    for (JobSplit.TaskSplitMetaInfo metaInfo : metaInfos) {
-                        long off = metaInfo.getStartOffset();
-
-                        String[] hosts = metaInfo.getLocations();
-
-                        in.seek(off);
-
-                        String clsName = Text.readString(in);
-
-                        HadoopFileBlock block = HadoopV1Splitter.readFileBlock(clsName, in, hosts);
-
-                        if (block == null)
-                            block = HadoopV2Splitter.readFileBlock(clsName, in, hosts);
-
-                        res.add(block != null ? block : new HadoopExternalSplit(hosts, off));
-                    }
-
-                    return res;
-                }
-            }
-            catch (Throwable e) {
-                if (e instanceof Error)
-                    throw (Error)e;
-                else
-                    throw transformException(e);
-            }
-        }
-        finally {
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings({"unchecked", "MismatchedQueryAndUpdateOfCollection" })
-    @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
-        T2<HadoopTaskType, Integer> locTaskId = new T2<>(info.type(),  info.taskNumber());
-
-        GridFutureAdapter<HadoopTaskContext> fut = ctxs.get(locTaskId);
-
-        if (fut != null)
-            return fut.get();
-
-        GridFutureAdapter<HadoopTaskContext> old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>());
-
-        if (old != null)
-            return old.get();
-
-        Class<? extends HadoopTaskContext> cls = taskCtxClsPool.poll();
-
-        try {
-            if (cls == null) {
-                // If there is no pooled class, then load new one.
-                // Note that the classloader identified by the task it was initially created for,
-                // but later it may be reused for other tasks.
-                HadoopClassLoader ldr = new HadoopClassLoader(rsrcMgr.classPath(),
-                    HadoopClassLoader.nameForTask(info, false), libNames);
-
-                cls = (Class<? extends HadoopTaskContext>)ldr.loadClass(HadoopV2TaskContext.class.getName());
-
-                fullCtxClsQueue.add(cls);
-            }
-
-            Constructor<?> ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJob.class,
-                HadoopJobId.class, UUID.class, DataInput.class);
-
-            if (jobConfData == null)
-                synchronized(jobConf) {
-                    if (jobConfData == null) {
-                        ByteArrayOutputStream buf = new ByteArrayOutputStream();
-
-                        jobConf.write(new DataOutputStream(buf));
-
-                        jobConfData = buf.toByteArray();
-                    }
-                }
-
-            HadoopTaskContext res = (HadoopTaskContext)ctr.newInstance(info, this, jobId, locNodeId,
-                new DataInputStream(new ByteArrayInputStream(jobConfData)));
-
-            fut.onDone(res);
-
-            return res;
-        }
-        catch (Throwable e) {
-            IgniteCheckedException te = transformException(e);
-
-            fut.onDone(te);
-
-            if (e instanceof Error)
-                throw (Error)e;
-
-            throw te;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void initialize(boolean external, UUID locNodeId) throws IgniteCheckedException {
-        assert locNodeId != null;
-
-        this.locNodeId = locNodeId;
-
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
-
-        try {
-            rsrcMgr.prepareJobEnvironment(!external, jobLocalDir(locNodeId, jobId));
-        }
-        finally {
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ThrowFromFinallyBlock")
-    @Override public void dispose(boolean external) throws IgniteCheckedException {
-        try {
-            if (rsrcMgr != null && !external) {
-                File jobLocDir = jobLocalDir(locNodeId, jobId);
-
-                if (jobLocDir.exists())
-                    U.delete(jobLocDir);
-            }
-        }
-        finally {
-            taskCtxClsPool.clear();
-
-            Throwable err = null;
-
-            // Stop the daemon threads that have been created
-            // with the task class loaders:
-            while (true) {
-                Class<? extends HadoopTaskContext> cls = fullCtxClsQueue.poll();
-
-                if (cls == null)
-                    break;
-
-                try {
-                    final ClassLoader ldr = cls.getClassLoader();
-
-                    try {
-                        // Stop Hadoop daemons for this *task*:
-                        stopHadoopFsDaemons(ldr);
-                    }
-                    catch (Exception e) {
-                        if (err == null)
-                            err = e;
-                    }
-
-                    // Also close all the FileSystems cached in
-                    // HadoopLazyConcurrentMap for this *task* class loader:
-                    closeCachedTaskFileSystems(ldr);
-                }
-                catch (Throwable e) {
-                    if (err == null)
-                        err = e;
-
-                    if (e instanceof Error)
-                        throw (Error)e;
-                }
-            }
-
-            assert fullCtxClsQueue.isEmpty();
-
-            try {
-                // Close all cached file systems for this *Job*:
-                fsMap.close();
-            }
-            catch (Exception e) {
-                if (err == null)
-                    err = e;
-            }
-
-            if (err != null)
-                throw U.cast(err);
-        }
-    }
-
-    /**
-     * Stops Hadoop Fs daemon threads.
-     * @param ldr The task ClassLoader to stop the daemons for.
-     * @throws Exception On error.
-     */
-    private void stopHadoopFsDaemons(ClassLoader ldr) throws Exception {
-        Class<?> daemonCls = ldr.loadClass(HadoopClassLoader.CLS_DAEMON);
-
-        Method m = daemonCls.getMethod("dequeueAndStopAll");
-
-        m.invoke(null);
-    }
-
-    /**
-     * Closes all the file systems user by task
-     * @param ldr The task class loader.
-     * @throws Exception On error.
-     */
-    private void closeCachedTaskFileSystems(ClassLoader ldr) throws Exception {
-        Class<?> clazz = ldr.loadClass(HadoopV2TaskContext.class.getName());
-
-        Method m = clazz.getMethod("close");
-
-        m.invoke(null);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
-        rsrcMgr.prepareTaskWorkDir(taskLocalDir(locNodeId, info));
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
-        HadoopTaskContext ctx = ctxs.remove(new T2<>(info.type(), info.taskNumber())).get();
-
-        taskCtxClsPool.add(ctx.getClass());
-
-        File locDir = taskLocalDir(locNodeId, info);
-
-        if (locDir.exists())
-            U.delete(locDir);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cleanupStagingDirectory() {
-        rsrcMgr.cleanupStagingDirectory();
-    }
-
-    /**
-     * Getter for job configuration.
-     * @return The job configuration.
-     */
-    public JobConf jobConf() {
-        return jobConf;
-    }
-
-    /**
-     * Gets file system for this job.
-     * @param uri The uri.
-     * @param cfg The configuration.
-     * @return The file system.
-     * @throws IOException On error.
-     */
-    public FileSystem fileSystem(@Nullable URI uri, Configuration cfg) throws IOException {
-        return fileSystemForMrUserWithCaching(uri, cfg, fsMap);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
deleted file mode 100644
index 33aef60..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.nio.file.FileSystemException;
-import java.nio.file.Files;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.util.RunJar;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Provides all resources are needed to the job execution. Downloads the main jar, the configuration and additional
- * files are needed to be placed on local files system.
- */
-class HadoopV2JobResourceManager {
-    /** File type Fs disable caching property name. */
-    private static final String FILE_DISABLE_CACHING_PROPERTY_NAME =
-        HadoopFileSystemsUtils.disableFsCachePropertyName("file");
-
-    /** Hadoop job context. */
-    private final JobContextImpl ctx;
-
-    /** Logger. */
-    private final IgniteLogger log;
-
-    /** Job ID. */
-    private final HadoopJobId jobId;
-
-    /** Class path list. */
-    private URL[] clsPath;
-
-    /** Set of local resources. */
-    private final Collection<File> rsrcSet = new HashSet<>();
-
-    /** Staging directory to delivery job jar and config to the work nodes. */
-    private Path stagingDir;
-
-    /** The job. */
-    private final HadoopV2Job job;
-
-    /**
-     * Creates new instance.
-     * @param jobId Job ID.
-     * @param ctx Hadoop job context.
-     * @param log Logger.
-     */
-    public HadoopV2JobResourceManager(HadoopJobId jobId, JobContextImpl ctx, IgniteLogger log, HadoopV2Job job) {
-        this.jobId = jobId;
-        this.ctx = ctx;
-        this.log = log.getLogger(HadoopV2JobResourceManager.class);
-        this.job = job;
-    }
-
-    /**
-     * Set working directory in local file system.
-     *
-     * @param dir Working directory.
-     * @throws IOException If fails.
-     */
-    private void setLocalFSWorkingDirectory(File dir) throws IOException {
-        JobConf cfg = ctx.getJobConf();
-
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(cfg.getClassLoader());
-
-        try {
-            cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, dir.getAbsolutePath());
-
-            if (!cfg.getBoolean(FILE_DISABLE_CACHING_PROPERTY_NAME, false))
-                FileSystem.getLocal(cfg).setWorkingDirectory(new Path(dir.getAbsolutePath()));
-        }
-        finally {
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /**
-     * Prepare job resources. Resolve the classpath list and download it if needed.
-     *
-     * @param download {@code true} If need to download resources.
-     * @param jobLocDir Work directory for the job.
-     * @throws IgniteCheckedException If failed.
-     */
-    public void prepareJobEnvironment(boolean download, File jobLocDir) throws IgniteCheckedException {
-        try {
-            if (jobLocDir.exists())
-                throw new IgniteCheckedException("Local job directory already exists: " + jobLocDir.getAbsolutePath());
-
-            JobConf cfg = ctx.getJobConf();
-
-            String mrDir = cfg.get("mapreduce.job.dir");
-
-            if (mrDir != null) {
-                stagingDir = new Path(new URI(mrDir));
-
-                if (download) {
-                    FileSystem fs = job.fileSystem(stagingDir.toUri(), cfg);
-
-                    if (!fs.exists(stagingDir))
-                        throw new IgniteCheckedException("Failed to find map-reduce submission " +
-                            "directory (does not exist): " + stagingDir);
-
-                    if (!FileUtil.copy(fs, stagingDir, jobLocDir, false, cfg))
-                        throw new IgniteCheckedException("Failed to copy job submission directory "
-                            + "contents to local file system "
-                            + "[path=" + stagingDir + ", locDir=" + jobLocDir.getAbsolutePath()
-                            + ", jobId=" + jobId + ']');
-                }
-
-                File jarJobFile = new File(jobLocDir, "job.jar");
-
-                Collection<URL> clsPathUrls = new ArrayList<>();
-
-                clsPathUrls.add(jarJobFile.toURI().toURL());
-
-                rsrcSet.add(jarJobFile);
-                rsrcSet.add(new File(jobLocDir, "job.xml"));
-
-                processFiles(jobLocDir, ctx.getCacheFiles(), download, false, null, MRJobConfig.CACHE_LOCALFILES);
-                processFiles(jobLocDir, ctx.getCacheArchives(), download, true, null, MRJobConfig.CACHE_LOCALARCHIVES);
-                processFiles(jobLocDir, ctx.getFileClassPaths(), download, false, clsPathUrls, null);
-                processFiles(jobLocDir, ctx.getArchiveClassPaths(), download, true, clsPathUrls, null);
-
-                if (!clsPathUrls.isEmpty()) {
-                    clsPath = new URL[clsPathUrls.size()];
-
-                    clsPathUrls.toArray(clsPath);
-                }
-            }
-            else if (!jobLocDir.mkdirs())
-                throw new IgniteCheckedException("Failed to create local job directory: "
-                    + jobLocDir.getAbsolutePath());
-
-            setLocalFSWorkingDirectory(jobLocDir);
-        }
-        catch (URISyntaxException | IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /**
-     * Process list of resources.
-     *
-     * @param jobLocDir Job working directory.
-     * @param files Array of {@link java.net.URI} or {@link org.apache.hadoop.fs.Path} to process resources.
-     * @param download {@code true}, if need to download. Process class path only else.
-     * @param extract {@code true}, if need to extract archive.
-     * @param clsPathUrls Collection to add resource as classpath resource.
-     * @param rsrcNameProp Property for resource name array setting.
-     * @throws IOException If failed.
-     */
-    private void processFiles(File jobLocDir, @Nullable Object[] files, boolean download, boolean extract,
-        @Nullable Collection<URL> clsPathUrls, @Nullable String rsrcNameProp) throws IOException {
-        if (F.isEmptyOrNulls(files))
-            return;
-
-        Collection<String> res = new ArrayList<>();
-
-        for (Object pathObj : files) {
-            Path srcPath;
-
-            if (pathObj instanceof URI) {
-                URI uri = (URI)pathObj;
-
-                srcPath = new Path(uri);
-            }
-            else
-                srcPath = (Path)pathObj;
-
-            String locName = srcPath.getName();
-
-            File dstPath = new File(jobLocDir.getAbsolutePath(), locName);
-
-            res.add(locName);
-
-            rsrcSet.add(dstPath);
-
-            if (clsPathUrls != null)
-                clsPathUrls.add(dstPath.toURI().toURL());
-
-            if (!download)
-                continue;
-
-            JobConf cfg = ctx.getJobConf();
-
-            FileSystem dstFs = FileSystem.getLocal(cfg);
-
-            FileSystem srcFs = job.fileSystem(srcPath.toUri(), cfg);
-
-            if (extract) {
-                File archivesPath = new File(jobLocDir.getAbsolutePath(), ".cached-archives");
-
-                if (!archivesPath.exists() && !archivesPath.mkdir())
-                    throw new IOException("Failed to create directory " +
-                        "[path=" + archivesPath + ", jobId=" + jobId + ']');
-
-                File archiveFile = new File(archivesPath, locName);
-
-                FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg);
-
-                String archiveNameLC = archiveFile.getName().toLowerCase();
-
-                if (archiveNameLC.endsWith(".jar"))
-                    RunJar.unJar(archiveFile, dstPath);
-                else if (archiveNameLC.endsWith(".zip"))
-                    FileUtil.unZip(archiveFile, dstPath);
-                else if (archiveNameLC.endsWith(".tar.gz") ||
-                    archiveNameLC.endsWith(".tgz") ||
-                    archiveNameLC.endsWith(".tar"))
-                    FileUtil.unTar(archiveFile, dstPath);
-                else
-                    throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']');
-            }
-            else
-                FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg);
-        }
-
-        if (!res.isEmpty() && rsrcNameProp != null)
-            ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
-    }
-
-    /**
-     * Prepares working directory for the task.
-     *
-     * <ul>
-     *     <li>Creates working directory.</li>
-     *     <li>Creates symbolic links to all job resources in working directory.</li>
-     * </ul>
-     *
-     * @param path Path to working directory of the task.
-     * @throws IgniteCheckedException If fails.
-     */
-    public void prepareTaskWorkDir(File path) throws IgniteCheckedException {
-        try {
-            if (path.exists())
-                throw new IOException("Task local directory already exists: " + path);
-
-            if (!path.mkdir())
-                throw new IOException("Failed to create directory: " + path);
-
-            for (File resource : rsrcSet) {
-                File symLink = new File(path, resource.getName());
-
-                try {
-                    Files.createSymbolicLink(symLink.toPath(), resource.toPath());
-                }
-                catch (IOException e) {
-                    String msg = "Unable to create symlink \"" + symLink + "\" to \"" + resource + "\".";
-
-                    if (U.isWindows() && e instanceof FileSystemException)
-                        msg += "\n\nAbility to create symbolic links is required!\n" +
-                                "On Windows platform you have to grant permission 'Create symbolic links'\n" +
-                                "to your user or run the Accelerator as Administrator.\n";
-
-                    throw new IOException(msg, e);
-                }
-            }
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException("Unable to prepare local working directory for the task " +
-                 "[jobId=" + jobId + ", path=" + path+ ']', e);
-        }
-    }
-
-    /**
-     * Cleans up job staging directory.
-     */
-    public void cleanupStagingDirectory() {
-        try {
-            if (stagingDir != null) {
-                FileSystem fs = job.fileSystem(stagingDir.toUri(), ctx.getJobConf());
-
-                fs.delete(stagingDir, true);
-            }
-        }
-        catch (Exception e) {
-            log.error("Failed to remove job staging directory [path=" + stagingDir + ", jobId=" + jobId + ']' , e);
-        }
-    }
-
-    /**
-     * Returns array of class path for current job.
-     *
-     * @return Class path collection.
-     */
-    @Nullable public URL[] classPath() {
-        return clsPath;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
deleted file mode 100644
index fafa79b..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-
-/**
- * Hadoop map task implementation for v2 API.
- */
-public class HadoopV2MapTask extends HadoopV2Task {
-    /**
-     * @param taskInfo Task info.
-     */
-    public HadoopV2MapTask(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings({"ConstantConditions", "unchecked"})
-    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
-        OutputFormat outputFormat = null;
-        Exception err = null;
-
-        JobContextImpl jobCtx = taskCtx.jobContext();
-
-        try {
-            InputSplit nativeSplit = hadoopContext().getInputSplit();
-
-            if (nativeSplit == null)
-                throw new IgniteCheckedException("Input split cannot be null.");
-
-            InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(),
-                hadoopContext().getConfiguration());
-
-            RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopContext());
-
-            reader.initialize(nativeSplit, hadoopContext());
-
-            hadoopContext().reader(reader);
-
-            HadoopJobInfo jobInfo = taskCtx.job().info();
-
-            outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx);
-
-            Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopContext().getConfiguration());
-
-            try {
-                mapper.run(new WrappedMapper().getMapContext(hadoopContext()));
-            }
-            finally {
-                closeWriter();
-            }
-
-            commit(outputFormat);
-        }
-        catch (InterruptedException e) {
-            err = e;
-
-            Thread.currentThread().interrupt();
-
-            throw new IgniteInterruptedCheckedException(e);
-        }
-        catch (Exception e) {
-            err = e;
-
-            throw new IgniteCheckedException(e);
-        }
-        finally {
-            if (err != null)
-                abort(outputFormat);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
deleted file mode 100644
index e199ede..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.Partitioner;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
-
-/**
- * Hadoop partitioner adapter for v2 API.
- */
-public class HadoopV2Partitioner implements HadoopPartitioner {
-    /** Partitioner instance. */
-    private Partitioner<Object, Object> part;
-
-    /**
-     * @param cls Hadoop partitioner class.
-     * @param conf Job configuration.
-     */
-    public HadoopV2Partitioner(Class<? extends Partitioner<?, ?>> cls, Configuration conf) {
-        part = (Partitioner<Object, Object>) ReflectionUtils.newInstance(cls, conf);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int partition(Object key, Object val, int parts) {
-        return part.getPartition(key, val, parts);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
deleted file mode 100644
index e5c2ed2..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-
-/**
- * Hadoop reduce task implementation for v2 API.
- */
-public class HadoopV2ReduceTask extends HadoopV2Task {
-    /** {@code True} if reduce, {@code false} if combine. */
-    private final boolean reduce;
-
-    /**
-     * Constructor.
-     *
-     * @param taskInfo Task info.
-     * @param reduce {@code True} if reduce, {@code false} if combine.
-     */
-    public HadoopV2ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) {
-        super(taskInfo);
-
-        this.reduce = reduce;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings({"ConstantConditions", "unchecked"})
-    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
-        OutputFormat outputFormat = null;
-        Exception err = null;
-
-        JobContextImpl jobCtx = taskCtx.jobContext();
-
-        try {
-            outputFormat = reduce || !taskCtx.job().info().hasReducer() ? prepareWriter(jobCtx) : null;
-
-            Reducer reducer;
-            if (reduce) reducer = ReflectionUtils.newInstance(jobCtx.getReducerClass(),
-                jobCtx.getConfiguration());
-            else reducer = ReflectionUtils.newInstance(jobCtx.getCombinerClass(),
-                jobCtx.getConfiguration());
-
-            try {
-                reducer.run(new WrappedReducer().getReducerContext(hadoopContext()));
-            }
-            finally {
-                closeWriter();
-            }
-
-            commit(outputFormat);
-        }
-        catch (InterruptedException e) {
-            err = e;
-
-            Thread.currentThread().interrupt();
-
-            throw new IgniteInterruptedCheckedException(e);
-        }
-        catch (Exception e) {
-            err = e;
-
-            throw new IgniteCheckedException(e);
-        }
-        finally {
-            if (err != null)
-                abort(outputFormat);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
deleted file mode 100644
index 49b5ee7..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.IOException;
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-
-/**
- * Hadoop setup task (prepares job).
- */
-public class HadoopV2SetupTask extends HadoopV2Task {
-    /**
-     * Constructor.
-     *
-     * @param taskInfo task info.
-     */
-    public HadoopV2SetupTask(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ConstantConditions")
-    @Override protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
-        try {
-            JobContextImpl jobCtx = taskCtx.jobContext();
-
-            OutputFormat outputFormat = getOutputFormat(jobCtx);
-
-            outputFormat.checkOutputSpecs(jobCtx);
-
-            OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
-
-            if (committer != null)
-                committer.setupJob(jobCtx);
-        }
-        catch (ClassNotFoundException | IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            throw new IgniteInterruptedCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
deleted file mode 100644
index f4ed668..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.DataInput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Hadoop API v2 splitter.
- */
-public class HadoopV2Splitter {
-    /** */
-    private static final String[] EMPTY_HOSTS = {};
-
-    /**
-     * @param ctx Job context.
-     * @return Collection of mapped splits.
-     * @throws IgniteCheckedException If mapping failed.
-     */
-    public static Collection<HadoopInputSplit> splitJob(JobContext ctx) throws IgniteCheckedException {
-        try {
-            InputFormat<?, ?> format = ReflectionUtils.newInstance(ctx.getInputFormatClass(), ctx.getConfiguration());
-
-            assert format != null;
-
-            List<InputSplit> splits = format.getSplits(ctx);
-
-            Collection<HadoopInputSplit> res = new ArrayList<>(splits.size());
-
-            int id = 0;
-
-            for (InputSplit nativeSplit : splits) {
-                if (nativeSplit instanceof FileSplit) {
-                    FileSplit s = (FileSplit)nativeSplit;
-
-                    res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
-                }
-                else
-                    res.add(HadoopUtils.wrapSplit(id, nativeSplit, nativeSplit.getLocations()));
-
-                id++;
-            }
-
-            return res;
-        }
-        catch (IOException | ClassNotFoundException e) {
-            throw new IgniteCheckedException(e);
-        }
-        catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-
-            throw new IgniteInterruptedCheckedException(e);
-        }
-    }
-
-    /**
-     * @param clsName Input split class name.
-     * @param in Input stream.
-     * @param hosts Optional hosts.
-     * @return File block or {@code null} if it is not a {@link FileSplit} instance.
-     * @throws IgniteCheckedException If failed.
-     */
-    public static HadoopFileBlock readFileBlock(String clsName, DataInput in, @Nullable String[] hosts)
-        throws IgniteCheckedException {
-        if (!FileSplit.class.getName().equals(clsName))
-            return null;
-
-        FileSplit split = new FileSplit();
-
-        try {
-            split.readFields(in);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-
-        if (hosts == null)
-            hosts = EMPTY_HOSTS;
-
-        return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
deleted file mode 100644
index 1383a61..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.IOException;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTask;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Extended Hadoop v2 task.
- */
-public abstract class HadoopV2Task extends HadoopTask {
-    /** Hadoop context. */
-    private HadoopV2Context hadoopCtx;
-
-    /**
-     * Constructor.
-     *
-     * @param taskInfo Task info.
-     */
-    protected HadoopV2Task(HadoopTaskInfo taskInfo) {
-        super(taskInfo);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
-        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
-
-        hadoopCtx = new HadoopV2Context(ctx);
-
-        run0(ctx);
-    }
-
-    /**
-     * Internal task routine.
-     *
-     * @param taskCtx Task context.
-     * @throws IgniteCheckedException
-     */
-    protected abstract void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException;
-
-    /**
-     * @return hadoop context.
-     */
-    protected HadoopV2Context hadoopContext() {
-        return hadoopCtx;
-    }
-
-    /**
-     * Create and configure an OutputFormat instance.
-     *
-     * @param jobCtx Job context.
-     * @return Instance of OutputFormat is specified in job configuration.
-     * @throws ClassNotFoundException If specified class not found.
-     */
-    protected OutputFormat getOutputFormat(JobContext jobCtx) throws ClassNotFoundException {
-        return ReflectionUtils.newInstance(jobCtx.getOutputFormatClass(), hadoopContext().getConfiguration());
-    }
-
-    /**
-     * Put write into Hadoop context and return associated output format instance.
-     *
-     * @param jobCtx Job context.
-     * @return Output format.
-     * @throws IgniteCheckedException In case of Grid exception.
-     * @throws InterruptedException In case of interrupt.
-     */
-    protected OutputFormat prepareWriter(JobContext jobCtx)
-        throws IgniteCheckedException, InterruptedException {
-        try {
-            OutputFormat outputFormat = getOutputFormat(jobCtx);
-
-            assert outputFormat != null;
-
-            OutputCommitter outCommitter = outputFormat.getOutputCommitter(hadoopCtx);
-
-            if (outCommitter != null)
-                outCommitter.setupTask(hadoopCtx);
-
-            RecordWriter writer = outputFormat.getRecordWriter(hadoopCtx);
-
-            hadoopCtx.writer(writer);
-
-            return outputFormat;
-        }
-        catch (IOException | ClassNotFoundException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /**
-     * Closes writer.
-     *
-     * @throws Exception If fails and logger hasn't been specified.
-     */
-    protected void closeWriter() throws Exception {
-        RecordWriter writer = hadoopCtx.writer();
-
-        if (writer != null)
-            writer.close(hadoopCtx);
-    }
-
-    /**
-     * Setup task.
-     *
-     * @param outputFormat Output format.
-     * @throws IOException In case of IO exception.
-     * @throws InterruptedException In case of interrupt.
-     */
-    protected void setup(@Nullable OutputFormat outputFormat) throws IOException, InterruptedException {
-        if (hadoopCtx.writer() != null) {
-            assert outputFormat != null;
-
-            outputFormat.getOutputCommitter(hadoopCtx).setupTask(hadoopCtx);
-        }
-    }
-
-    /**
-     * Commit task.
-     *
-     * @param outputFormat Output format.
-     * @throws IgniteCheckedException In case of Grid exception.
-     * @throws IOException In case of IO exception.
-     * @throws InterruptedException In case of interrupt.
-     */
-    protected void commit(@Nullable OutputFormat outputFormat) throws IgniteCheckedException, IOException, InterruptedException {
-        if (hadoopCtx.writer() != null) {
-            assert outputFormat != null;
-
-            OutputCommitter outputCommitter = outputFormat.getOutputCommitter(hadoopCtx);
-
-            if (outputCommitter.needsTaskCommit(hadoopCtx))
-                outputCommitter.commitTask(hadoopCtx);
-        }
-    }
-
-    /**
-     * Abort task.
-     *
-     * @param outputFormat Output format.
-     */
-    protected void abort(@Nullable OutputFormat outputFormat) {
-        if (hadoopCtx.writer() != null) {
-            assert outputFormat != null;
-
-            try {
-                outputFormat.getOutputCommitter(hadoopCtx).abortTask(hadoopCtx);
-            }
-            catch (IOException ignore) {
-                // Ignore.
-            }
-            catch (InterruptedException ignore) {
-                Thread.currentThread().interrupt();
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cancel() {
-        hadoopCtx.cancel();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
deleted file mode 100644
index 4b1121c..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
+++ /dev/null
@@ -1,560 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.DataInput;
-import java.io.File;
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Comparator;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.serializer.Deserializer;
-import org.apache.hadoop.io.serializer.Serialization;
-import org.apache.hadoop.io.serializer.SerializationFactory;
-import org.apache.hadoop.io.serializer.WritableSerialization;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContextImpl;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.TaskAttemptID;
-import org.apache.hadoop.mapred.TaskID;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopJob;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.apache.ignite.internal.processors.hadoop.HadoopTask;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
-import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounter;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1CleanupTask;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1MapTask;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1Partitioner;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1ReduceTask;
-import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1SetupTask;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.jobLocalDir;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.taskLocalDir;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.transformException;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.unwrapSplit;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.FsCacheKey;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.createHadoopLazyConcurrentMap;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.fileSystemForMrUserWithCaching;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
-
-/**
- * Context for task execution.
- */
-public class HadoopV2TaskContext extends HadoopTaskContext {
-    /** */
-    private static final boolean COMBINE_KEY_GROUPING_SUPPORTED;
-
-    /** Lazy per-user file system cache used by the Hadoop task. */
-    private static final HadoopLazyConcurrentMap<FsCacheKey, FileSystem> fsMap
-        = createHadoopLazyConcurrentMap();
-
-    /**
-     * This method is called with reflection upon Job finish with class loader of each task.
-     * This will clean up all the Fs created for specific task.
-     * Each class loader sees uses its own instance of <code>fsMap<code/> since the class loaders
-     * are different.
-     *
-     * @throws IgniteCheckedException On error.
-     */
-    public static void close() throws IgniteCheckedException {
-        fsMap.close();
-    }
-
-    /**
-     * Check for combiner grouping support (available since Hadoop 2.3).
-     */
-    static {
-        boolean ok;
-
-        try {
-            JobContext.class.getDeclaredMethod("getCombinerKeyGroupingComparator");
-
-            ok = true;
-        }
-        catch (NoSuchMethodException ignore) {
-            ok = false;
-        }
-
-        COMBINE_KEY_GROUPING_SUPPORTED = ok;
-    }
-
-    /** Flag is set if new context-object code is used for running the mapper. */
-    private final boolean useNewMapper;
-
-    /** Flag is set if new context-object code is used for running the reducer. */
-    private final boolean useNewReducer;
-
-    /** Flag is set if new context-object code is used for running the combiner. */
-    private final boolean useNewCombiner;
-
-    /** */
-    private final JobContextImpl jobCtx;
-
-    /** Set if task is to cancelling. */
-    private volatile boolean cancelled;
-
-    /** Current task. */
-    private volatile HadoopTask task;
-
-    /** Local node ID */
-    private final UUID locNodeId;
-
-    /** Counters for task. */
-    private final HadoopCounters cntrs = new HadoopCountersImpl();
-
-    /**
-     * @param taskInfo Task info.
-     * @param job Job.
-     * @param jobId Job ID.
-     * @param locNodeId Local node ID.
-     * @param jobConfDataInput DataInput for read JobConf.
-     */
-    public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJob job, HadoopJobId jobId,
-        @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException {
-        super(taskInfo, job);
-        this.locNodeId = locNodeId;
-
-        // Before create JobConf instance we should set new context class loader.
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
-
-        try {
-            JobConf jobConf = new JobConf();
-
-            try {
-                jobConf.readFields(jobConfDataInput);
-            }
-            catch (IOException e) {
-                throw new IgniteCheckedException(e);
-            }
-
-            // For map-reduce jobs prefer local writes.
-            jobConf.setBooleanIfUnset(PARAM_IGFS_PREFER_LOCAL_WRITES, true);
-
-            jobCtx = new JobContextImpl(jobConf, new JobID(jobId.globalId().toString(), jobId.localId()));
-
-            useNewMapper = jobConf.getUseNewMapper();
-            useNewReducer = jobConf.getUseNewReducer();
-            useNewCombiner = jobConf.getCombinerClass() == null;
-        }
-        finally {
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public <T extends HadoopCounter> T counter(String grp, String name, Class<T> cls) {
-        return cntrs.counter(grp, name, cls);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopCounters counters() {
-        return cntrs;
-    }
-
-    /**
-     * Creates appropriate task from current task info.
-     *
-     * @return Task.
-     */
-    private HadoopTask createTask() {
-        boolean isAbort = taskInfo().type() == HadoopTaskType.ABORT;
-
-        switch (taskInfo().type()) {
-            case SETUP:
-                return useNewMapper ? new HadoopV2SetupTask(taskInfo()) : new HadoopV1SetupTask(taskInfo());
-
-            case MAP:
-                return useNewMapper ? new HadoopV2MapTask(taskInfo()) : new HadoopV1MapTask(taskInfo());
-
-            case REDUCE:
-                return useNewReducer ? new HadoopV2ReduceTask(taskInfo(), true) :
-                    new HadoopV1ReduceTask(taskInfo(), true);
-
-            case COMBINE:
-                return useNewCombiner ? new HadoopV2ReduceTask(taskInfo(), false) :
-                    new HadoopV1ReduceTask(taskInfo(), false);
-
-            case COMMIT:
-            case ABORT:
-                return useNewReducer ? new HadoopV2CleanupTask(taskInfo(), isAbort) :
-                    new HadoopV1CleanupTask(taskInfo(), isAbort);
-
-            default:
-                return null;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void run() throws IgniteCheckedException {
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf().getClassLoader());
-
-        try {
-            try {
-                task = createTask();
-            }
-            catch (Throwable e) {
-                if (e instanceof Error)
-                    throw e;
-
-                throw transformException(e);
-            }
-
-            if (cancelled)
-                throw new HadoopTaskCancelledException("Task cancelled.");
-
-            try {
-                task.run(this);
-            }
-            catch (Throwable e) {
-                if (e instanceof Error)
-                    throw e;
-
-                throw transformException(e);
-            }
-        }
-        finally {
-            task = null;
-
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cancel() {
-        cancelled = true;
-
-        HadoopTask t = task;
-
-        if (t != null)
-            t.cancel();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void prepareTaskEnvironment() throws IgniteCheckedException {
-        File locDir;
-
-        switch(taskInfo().type()) {
-            case MAP:
-            case REDUCE:
-                job().prepareTaskEnvironment(taskInfo());
-
-                locDir = taskLocalDir(locNodeId, taskInfo());
-
-                break;
-
-            default:
-                locDir = jobLocalDir(locNodeId, taskInfo().jobId());
-        }
-
-        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf().getClassLoader());
-
-        try {
-            FileSystem.get(jobConf());
-
-            LocalFileSystem locFs = FileSystem.getLocal(jobConf());
-
-            locFs.setWorkingDirectory(new Path(locDir.getAbsolutePath()));
-        }
-        catch (Throwable e) {
-            if (e instanceof Error)
-                throw (Error)e;
-
-            throw transformException(e);
-        }
-        finally {
-            HadoopUtils.restoreContextClassLoader(oldLdr);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void cleanupTaskEnvironment() throws IgniteCheckedException {
-        job().cleanupTaskEnvironment(taskInfo());
-    }
-
-    /**
-     * Creates Hadoop attempt ID.
-     *
-     * @return Attempt ID.
-     */
-    public TaskAttemptID attemptId() {
-        TaskID tid = new TaskID(jobCtx.getJobID(), taskType(taskInfo().type()), taskInfo().taskNumber());
-
-        return new TaskAttemptID(tid, taskInfo().attempt());
-    }
-
-    /**
-     * @param type Task type.
-     * @return Hadoop task type.
-     */
-    private TaskType taskType(HadoopTaskType type) {
-        switch (type) {
-            case SETUP:
-                return TaskType.JOB_SETUP;
-            case MAP:
-            case COMBINE:
-                return TaskType.MAP;
-
-            case REDUCE:
-                return TaskType.REDUCE;
-
-            case COMMIT:
-            case ABORT:
-                return TaskType.JOB_CLEANUP;
-
-            default:
-                return null;
-        }
-    }
-
-    /**
-     * Gets job configuration of the task.
-     *
-     * @return Job configuration.
-     */
-    public JobConf jobConf() {
-        return jobCtx.getJobConf();
-    }
-
-    /**
-     * Gets job context of the task.
-     *
-     * @return Job context.
-     */
-    public JobContextImpl jobContext() {
-        return jobCtx;
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopPartitioner partitioner() throws IgniteCheckedException {
-        Class<?> partClsOld = jobConf().getClass("mapred.partitioner.class", null);
-
-        if (partClsOld != null)
-            return new HadoopV1Partitioner(jobConf().getPartitionerClass(), jobConf());
-
-        try {
-            return new HadoopV2Partitioner(jobCtx.getPartitionerClass(), jobConf());
-        }
-        catch (ClassNotFoundException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /**
-     * Gets serializer for specified class.
-     *
-     * @param cls Class.
-     * @param jobConf Job configuration.
-     * @return Appropriate serializer.
-     */
-    @SuppressWarnings("unchecked")
-    private HadoopSerialization getSerialization(Class<?> cls, Configuration jobConf) throws IgniteCheckedException {
-        A.notNull(cls, "cls");
-
-        SerializationFactory factory = new SerializationFactory(jobConf);
-
-        Serialization<?> serialization = factory.getSerialization(cls);
-
-        if (serialization == null)
-            throw new IgniteCheckedException("Failed to find serialization for: " + cls.getName());
-
-        if (serialization.getClass() == WritableSerialization.class)
-            return new HadoopWritableSerialization((Class<? extends Writable>)cls);
-
-        return new HadoopSerializationWrapper(serialization, cls);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopSerialization keySerialization() throws IgniteCheckedException {
-        return getSerialization(jobCtx.getMapOutputKeyClass(), jobConf());
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopSerialization valueSerialization() throws IgniteCheckedException {
-        return getSerialization(jobCtx.getMapOutputValueClass(), jobConf());
-    }
-
-    /** {@inheritDoc} */
-    @Override public Comparator<Object> sortComparator() {
-        return (Comparator<Object>)jobCtx.getSortComparator();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Comparator<Object> groupComparator() {
-        Comparator<?> res;
-
-        switch (taskInfo().type()) {
-            case COMBINE:
-                res = COMBINE_KEY_GROUPING_SUPPORTED ?
-                    jobContext().getCombinerKeyGroupingComparator() : jobContext().getGroupingComparator();
-
-                break;
-
-            case REDUCE:
-                res = jobContext().getGroupingComparator();
-
-                break;
-
-            default:
-                return null;
-        }
-
-        if (res != null && res.getClass() != sortComparator().getClass())
-            return (Comparator<Object>)res;
-
-        return null;
-    }
-
-    /**
-     * @param split Split.
-     * @return Native Hadoop split.
-     * @throws IgniteCheckedException if failed.
-     */
-    @SuppressWarnings("unchecked")
-    public Object getNativeSplit(HadoopInputSplit split) throws IgniteCheckedException {
-        if (split instanceof HadoopExternalSplit)
-            return readExternalSplit((HadoopExternalSplit)split);
-
-        if (split instanceof HadoopSplitWrapper)
-            return unwrapSplit((HadoopSplitWrapper)split);
-
-        throw new IllegalStateException("Unknown split: " + split);
-    }
-
-    /**
-     * @param split External split.
-     * @return Native input split.
-     * @throws IgniteCheckedException If failed.
-     */
-    @SuppressWarnings("unchecked")
-    private Object readExternalSplit(HadoopExternalSplit split) throws IgniteCheckedException {
-        Path jobDir = new Path(jobConf().get(MRJobConfig.MAPREDUCE_JOB_DIR));
-
-        FileSystem fs;
-
-        try {
-            // This assertion uses .startsWith() instead of .equals() because task class loaders may
-            // be reused between tasks of the same job.
-            assert ((HadoopClassLoader)getClass().getClassLoader()).name()
-                .startsWith(HadoopClassLoader.nameForTask(taskInfo(), true));
-
-            // We also cache Fs there, all them will be cleared explicitly upon the Job end.
-            fs = fileSystemForMrUserWithCaching(jobDir.toUri(), jobConf(), fsMap);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-
-        try (
-            FSDataInputStream in = fs.open(JobSubmissionFiles.getJobSplitFile(jobDir))) {
-
-            in.seek(split.offset());
-
-            String clsName = Text.readString(in);
-
-            Class<?> cls = jobConf().getClassByName(clsName);
-
-            assert cls != null;
-
-            Serialization serialization = new SerializationFactory(jobConf()).getSerialization(cls);
-
-            Deserializer deserializer = serialization.getDeserializer(cls);
-
-            deserializer.open(in);
-
-            Object res = deserializer.deserialize(null);
-
-            deserializer.close();
-
-            assert res != null;
-
-            return res;
-        }
-        catch (IOException | ClassNotFoundException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public <T> T runAsJobOwner(final Callable<T> c) throws IgniteCheckedException {
-        String user = job.info().user();
-
-        user = IgfsUtils.fixUserName(user);
-
-        assert user != null;
-
-        String ugiUser;
-
-        try {
-            UserGroupInformation currUser = UserGroupInformation.getCurrentUser();
-
-            assert currUser != null;
-
-            ugiUser = currUser.getShortUserName();
-        }
-        catch (IOException ioe) {
-            throw new IgniteCheckedException(ioe);
-        }
-
-        try {
-            if (F.eq(user, ugiUser))
-                // if current UGI context user is the same, do direct call:
-                return c.call();
-            else {
-                UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, user);
-
-                return ugi.doAs(new PrivilegedExceptionAction<T>() {
-                    @Override public T run() throws Exception {
-                        return c.call();
-                    }
-                });
-            }
-        }
-        catch (Exception e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
deleted file mode 100644
index f46f068..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.v2;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.apache.hadoop.io.Writable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Optimized serialization for Hadoop {@link Writable} types.
- */
-public class HadoopWritableSerialization implements HadoopSerialization {
-    /** */
-    private final Class<? extends Writable> cls;
-
-    /**
-     * @param cls Class.
-     */
-    public HadoopWritableSerialization(Class<? extends Writable> cls) {
-        assert cls != null;
-
-        this.cls = cls;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(DataOutput out, Object obj) throws IgniteCheckedException {
-        assert cls.isAssignableFrom(obj.getClass()) : cls + " " + obj.getClass();
-
-        try {
-            ((Writable)obj).write(out);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object read(DataInput in, @Nullable Object obj) throws IgniteCheckedException {
-        Writable w = obj == null ? U.newInstance(cls) : cls.cast(obj);
-
-        try {
-            w.readFields(in);
-        }
-        catch (IOException e) {
-            throw new IgniteCheckedException(e);
-        }
-
-        return w;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() {
-        // No-op.
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
deleted file mode 100644
index 8d5957b..0000000
--- a/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
deleted file mode 100644
index 5a20a75..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.client.hadoop;
-
-import org.apache.ignite.configuration.HadoopConfiguration;
-
-/**
- * Hadoop client protocol tests in embedded process mode.
- */
-public class HadoopClientProtocolEmbeddedSelfTest extends HadoopClientProtocolSelfTest {
-    /** {@inheritDoc} */
-    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
-        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
-
-        // TODO: IGNITE-404: Uncomment when fixed.
-        //cfg.setExternalExecution(false);
-
-        return cfg;
-    }
-}
\ No newline at end of file


[21/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
deleted file mode 100644
index 6b5c776..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.igfs.IgfsDirectoryNotEmptyException;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsParentNotDirectoryException;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathAlreadyExistsException;
-import org.apache.ignite.igfs.IgfsPathNotFoundException;
-import org.apache.ignite.igfs.IgfsUserContext;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.hadoop.HadoopPayloadAware;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProperties;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsSecondaryFileSystemPositionedReadable;
-import org.apache.ignite.internal.processors.igfs.IgfsEntryInfo;
-import org.apache.ignite.internal.processors.igfs.IgfsFileImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsSecondaryFileSystemV2;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.lang.IgniteOutClosure;
-import org.apache.ignite.lang.IgniteUuid;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.Callable;
-
-/**
- * Secondary file system which delegates calls to an instance of Hadoop {@link FileSystem}.
- * <p>
- * Target {@code FileSystem}'s are created on per-user basis using passed {@link HadoopFileSystemFactory}.
- */
-public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSystemV2, LifecycleAware,
-    HadoopPayloadAware {
-    /** The default user name. It is used if no user context is set. */
-    private String dfltUsrName;
-
-    /** Factory. */
-    private HadoopFileSystemFactory fsFactory;
-
-    /**
-     * Default constructor for Spring.
-     */
-    public IgniteHadoopIgfsSecondaryFileSystem() {
-        // No-op.
-    }
-
-    /**
-     * Simple constructor that is to be used by default.
-     *
-     * @param uri URI of file system.
-     * @throws IgniteCheckedException In case of error.
-     * @deprecated Use {@link #getFileSystemFactory()} instead.
-     */
-    @Deprecated
-    public IgniteHadoopIgfsSecondaryFileSystem(String uri) throws IgniteCheckedException {
-        this(uri, null, null);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param uri URI of file system.
-     * @param cfgPath Additional path to Hadoop configuration.
-     * @throws IgniteCheckedException In case of error.
-     * @deprecated Use {@link #getFileSystemFactory()} instead.
-     */
-    @Deprecated
-    public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath)
-        throws IgniteCheckedException {
-        this(uri, cfgPath, null);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param uri URI of file system.
-     * @param cfgPath Additional path to Hadoop configuration.
-     * @param userName User name.
-     * @throws IgniteCheckedException In case of error.
-     * @deprecated Use {@link #getFileSystemFactory()} instead.
-     */
-    @Deprecated
-    public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath,
-        @Nullable String userName) throws IgniteCheckedException {
-        setDefaultUserName(userName);
-
-        CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
-
-        fac.setUri(uri);
-
-        if (cfgPath != null)
-            fac.setConfigPaths(cfgPath);
-
-        setFileSystemFactory(fac);
-    }
-
-    /**
-     * Gets default user name.
-     * <p>
-     * Defines user name which will be used during file system invocation in case no user name is defined explicitly
-     * through {@link FileSystem#get(URI, Configuration, String)}.
-     * <p>
-     * Also this name will be used if you manipulate {@link IgniteFileSystem} directly and do not set user name
-     * explicitly using {@link IgfsUserContext#doAs(String, IgniteOutClosure)} or
-     * {@link IgfsUserContext#doAs(String, Callable)} methods.
-     * <p>
-     * If not set value of system property {@code "user.name"} will be used. If this property is not set either,
-     * {@code "anonymous"} will be used.
-     *
-     * @return Default user name.
-     */
-    @Nullable public String getDefaultUserName() {
-        return dfltUsrName;
-    }
-
-    /**
-     * Sets default user name. See {@link #getDefaultUserName()} for details.
-     *
-     * @param dfltUsrName Default user name.
-     */
-    public void setDefaultUserName(@Nullable String dfltUsrName) {
-        this.dfltUsrName = dfltUsrName;
-    }
-
-    /**
-     * Gets secondary file system factory.
-     * <p>
-     * This factory will be used whenever a call to a target {@link FileSystem} is required.
-     * <p>
-     * If not set, {@link CachingHadoopFileSystemFactory} will be used.
-     *
-     * @return Secondary file system factory.
-     */
-    public HadoopFileSystemFactory getFileSystemFactory() {
-        return fsFactory;
-    }
-
-    /**
-     * Sets secondary file system factory. See {@link #getFileSystemFactory()} for details.
-     *
-     * @param factory Secondary file system factory.
-     */
-    public void setFileSystemFactory(HadoopFileSystemFactory factory) {
-        this.fsFactory = factory;
-    }
-
-    /**
-     * Convert IGFS path into Hadoop path.
-     *
-     * @param path IGFS path.
-     * @return Hadoop path.
-     */
-    private Path convert(IgfsPath path) {
-        URI uri = fileSystemForUser().getUri();
-
-        return new Path(uri.getScheme(), uri.getAuthority(), path.toString());
-    }
-
-    /**
-     * Heuristically checks if exception was caused by invalid HDFS version and returns appropriate exception.
-     *
-     * @param e Exception to check.
-     * @param detailMsg Detailed error message.
-     * @return Appropriate exception.
-     */
-    private IgfsException handleSecondaryFsError(IOException e, String detailMsg) {
-        return cast(detailMsg, e);
-    }
-
-    /**
-     * Cast IO exception to IGFS exception.
-     *
-     * @param e IO exception.
-     * @return IGFS exception.
-     */
-    public static IgfsException cast(String msg, IOException e) {
-        if (e instanceof FileNotFoundException)
-            return new IgfsPathNotFoundException(e);
-        else if (e instanceof ParentNotDirectoryException)
-            return new IgfsParentNotDirectoryException(msg, e);
-        else if (e instanceof PathIsNotEmptyDirectoryException)
-            return new IgfsDirectoryNotEmptyException(e);
-        else if (e instanceof PathExistsException)
-            return new IgfsPathAlreadyExistsException(msg, e);
-        else
-            return new IgfsException(msg, e);
-    }
-
-    /**
-     * Convert Hadoop FileStatus properties to map.
-     *
-     * @param status File status.
-     * @return IGFS attributes.
-     */
-    private static Map<String, String> properties(FileStatus status) {
-        FsPermission perm = status.getPermission();
-
-        if (perm == null)
-            perm = FsPermission.getDefault();
-
-        HashMap<String, String> res = new HashMap<>(3);
-
-        res.put(IgfsUtils.PROP_PERMISSION, String.format("%04o", perm.toShort()));
-        res.put(IgfsUtils.PROP_USER_NAME, status.getOwner());
-        res.put(IgfsUtils.PROP_GROUP_NAME, status.getGroup());
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean exists(IgfsPath path) {
-        try {
-            return fileSystemForUser().exists(convert(path));
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to check file existence [path=" + path + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public IgfsFile update(IgfsPath path, Map<String, String> props) {
-        HadoopIgfsProperties props0 = new HadoopIgfsProperties(props);
-
-        final FileSystem fileSys = fileSystemForUser();
-
-        try {
-            if (props0.userName() != null || props0.groupName() != null)
-                fileSys.setOwner(convert(path), props0.userName(), props0.groupName());
-
-            if (props0.permission() != null)
-                fileSys.setPermission(convert(path), props0.permission());
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to update file properties [path=" + path + "]");
-        }
-
-        //Result is not used in case of secondary FS.
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void rename(IgfsPath src, IgfsPath dest) {
-        // Delegate to the secondary file system.
-        try {
-            if (!fileSystemForUser().rename(convert(src), convert(dest)))
-                throw new IgfsException("Failed to rename (secondary file system returned false) " +
-                    "[src=" + src + ", dest=" + dest + ']');
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to rename file [src=" + src + ", dest=" + dest + ']');
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean delete(IgfsPath path, boolean recursive) {
-        try {
-            return fileSystemForUser().delete(convert(path), recursive);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to delete file [path=" + path + ", recursive=" + recursive + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdirs(IgfsPath path) {
-        try {
-            if (!fileSystemForUser().mkdirs(convert(path)))
-                throw new IgniteException("Failed to make directories [path=" + path + "]");
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to make directories [path=" + path + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdirs(IgfsPath path, @Nullable Map<String, String> props) {
-        try {
-            if (!fileSystemForUser().mkdirs(convert(path), new HadoopIgfsProperties(props).permission()))
-                throw new IgniteException("Failed to make directories [path=" + path + ", props=" + props + "]");
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to make directories [path=" + path + ", props=" + props + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(IgfsPath path) {
-        try {
-            FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
-
-            if (statuses == null)
-                throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
-
-            Collection<IgfsPath> res = new ArrayList<>(statuses.length);
-
-            for (FileStatus status : statuses)
-                res.add(new IgfsPath(path, status.getPath().getName()));
-
-            return res;
-        }
-        catch (FileNotFoundException ignored) {
-            throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to list statuses due to secondary file system exception: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(IgfsPath path) {
-        try {
-            FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
-
-            if (statuses == null)
-                throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
-
-            Collection<IgfsFile> res = new ArrayList<>(statuses.length);
-
-            for (FileStatus s : statuses) {
-                IgfsEntryInfo fsInfo = s.isDirectory() ?
-                    IgfsUtils.createDirectory(
-                        IgniteUuid.randomUuid(),
-                        null,
-                        properties(s),
-                        s.getAccessTime(),
-                        s.getModificationTime()
-                    ) :
-                    IgfsUtils.createFile(
-                        IgniteUuid.randomUuid(),
-                        (int)s.getBlockSize(),
-                        s.getLen(),
-                        null,
-                        null,
-                        false,
-                        properties(s),
-                        s.getAccessTime(),
-                        s.getModificationTime()
-                    );
-
-                res.add(new IgfsFileImpl(new IgfsPath(path, s.getPath().getName()), fsInfo, 1));
-            }
-
-            return res;
-        }
-        catch (FileNotFoundException ignored) {
-            throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to list statuses due to secondary file system exception: " + path);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) {
-        return new HadoopIgfsSecondaryFileSystemPositionedReadable(fileSystemForUser(), convert(path), bufSize);
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream create(IgfsPath path, boolean overwrite) {
-        try {
-            return fileSystemForUser().create(convert(path), overwrite);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", overwrite=" + overwrite + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication,
-        long blockSize, @Nullable Map<String, String> props) {
-        HadoopIgfsProperties props0 =
-            new HadoopIgfsProperties(props != null ? props : Collections.<String, String>emptyMap());
-
-        try {
-            return fileSystemForUser().create(convert(path), props0.permission(), overwrite, bufSize,
-                (short) replication, blockSize, null);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to create file [path=" + path + ", props=" + props +
-                ", overwrite=" + overwrite + ", bufSize=" + bufSize + ", replication=" + replication +
-                ", blockSize=" + blockSize + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream append(IgfsPath path, int bufSize, boolean create,
-        @Nullable Map<String, String> props) {
-        try {
-            return fileSystemForUser().append(convert(path), bufSize);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to append file [path=" + path + ", bufSize=" + bufSize + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(final IgfsPath path) {
-        try {
-            final FileStatus status = fileSystemForUser().getFileStatus(convert(path));
-
-            if (status == null)
-                return null;
-
-            final Map<String, String> props = properties(status);
-
-            return new IgfsFile() {
-                @Override public IgfsPath path() {
-                    return path;
-                }
-
-                @Override public boolean isFile() {
-                    return status.isFile();
-                }
-
-                @Override public boolean isDirectory() {
-                    return status.isDirectory();
-                }
-
-                @Override public int blockSize() {
-                    // By convention directory has blockSize == 0, while file has blockSize > 0:
-                    return isDirectory() ? 0 : (int)status.getBlockSize();
-                }
-
-                @Override public long groupBlockSize() {
-                    return status.getBlockSize();
-                }
-
-                @Override public long accessTime() {
-                    return status.getAccessTime();
-                }
-
-                @Override public long modificationTime() {
-                    return status.getModificationTime();
-                }
-
-                @Override public String property(String name) throws IllegalArgumentException {
-                    String val = props.get(name);
-
-                    if (val ==  null)
-                        throw new IllegalArgumentException("File property not found [path=" + path + ", name=" + name + ']');
-
-                    return val;
-                }
-
-                @Nullable @Override public String property(String name, @Nullable String dfltVal) {
-                    String val = props.get(name);
-
-                    return val == null ? dfltVal : val;
-                }
-
-                @Override public long length() {
-                    return status.getLen();
-                }
-
-                /** {@inheritDoc} */
-                @Override public Map<String, String> properties() {
-                    return props;
-                }
-            };
-        }
-        catch (FileNotFoundException ignore) {
-            return null;
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to get file status [path=" + path + "]");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public long usedSpaceSize() {
-        try {
-            // We don't use FileSystem#getUsed() since it counts only the files
-            // in the filesystem root, not all the files recursively.
-            return fileSystemForUser().getContentSummary(new Path("/")).getSpaceConsumed();
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed to get used space size of file system.");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteException {
-        try {
-            // We don't use FileSystem#getUsed() since it counts only the files
-            // in the filesystem root, not all the files recursively.
-            fileSystemForUser().setTimes(convert(path), modificationTime, accessTime);
-        }
-        catch (IOException e) {
-            throw handleSecondaryFsError(e, "Failed set times for path: " + path);
-        }
-    }
-
-    /**
-     * Gets the underlying {@link FileSystem}.
-     * This method is used solely for testing.
-     * @return the underlying Hadoop {@link FileSystem}.
-     */
-    public FileSystem fileSystem() {
-        return fileSystemForUser();
-    }
-
-    /**
-     * Gets the FileSystem for the current context user.
-     * @return the FileSystem instance, never null.
-     */
-    private FileSystem fileSystemForUser() {
-        String user = IgfsUserContext.currentUser();
-
-        if (F.isEmpty(user))
-            user = IgfsUtils.fixUserName(dfltUsrName);
-
-        assert !F.isEmpty(user);
-
-        try {
-            return fsFactory.get(user);
-        }
-        catch (IOException ioe) {
-            throw new IgniteException(ioe);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteException {
-        dfltUsrName = IgfsUtils.fixUserName(dfltUsrName);
-
-        if (fsFactory == null)
-            fsFactory = new CachingHadoopFileSystemFactory();
-
-        if (fsFactory instanceof LifecycleAware)
-            ((LifecycleAware) fsFactory).start();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void stop() throws IgniteException {
-        if (fsFactory instanceof LifecycleAware)
-            ((LifecycleAware)fsFactory).stop();
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopFileSystemFactory getPayload() {
-        return fsFactory;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
deleted file mode 100644
index bbfbc59..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/KerberosHadoopFileSystemFactory.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.security.PrivilegedExceptionAction;
-
-/**
- * Secure Hadoop file system factory that can work with underlying file system protected with Kerberos.
- * It uses "impersonation" mechanism, to be able to work on behalf of arbitrary client user.
- * Please see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Superusers.html for details.
- * The principal and the key tab name to be used for Kerberos authentication are set explicitly
- * in the factory configuration.
- *
- * <p>This factory does not cache any file system instances. If {@code "fs.[prefix].impl.disable.cache"} is set
- * to {@code true}, file system instances will be cached by Hadoop.
- */
-public class KerberosHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** The default interval used to re-login from the key tab, in milliseconds. */
-    public static final long DFLT_RELOGIN_INTERVAL = 10 * 60 * 1000L;
-
-    /** Keytab full file name. */
-    private String keyTab;
-
-    /** Keytab principal. */
-    private String keyTabPrincipal;
-
-    /** The re-login interval. See {@link #getReloginInterval()} for more information. */
-    private long reloginInterval = DFLT_RELOGIN_INTERVAL;
-
-    /** Time of last re-login attempt, in system milliseconds. */
-    private transient volatile long lastReloginTime;
-
-    /**
-     * Constructor.
-     */
-    public KerberosHadoopFileSystemFactory() {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileSystem getWithMappedName(String name) throws IOException {
-        reloginIfNeeded();
-
-        return super.getWithMappedName(name);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected FileSystem create(String usrName) throws IOException, InterruptedException {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUser(usrName,
-            UserGroupInformation.getLoginUser());
-
-        return proxyUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
-            @Override public FileSystem run() throws Exception {
-                return FileSystem.get(fullUri, cfg);
-            }
-        });
-    }
-
-    /**
-     * Gets the key tab principal short name (e.g. "hdfs").
-     *
-     * @return The key tab principal.
-     */
-    @Nullable public String getKeyTabPrincipal() {
-        return keyTabPrincipal;
-    }
-
-    /**
-     * Set the key tab principal name. See {@link #getKeyTabPrincipal()} for more information.
-     *
-     * @param keyTabPrincipal The key tab principal name.
-     */
-    public void setKeyTabPrincipal(@Nullable String keyTabPrincipal) {
-        this.keyTabPrincipal = keyTabPrincipal;
-    }
-
-    /**
-     * Gets the key tab full file name (e.g. "/etc/security/keytabs/hdfs.headless.keytab" or "/etc/krb5.keytab").
-     * <p>
-     * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
-     * {@link IgniteHadoopFileSystem} resides. Corresponding path must exist on these machines as well.
-     *
-     * @return The key tab file name.
-     */
-    @Nullable public String getKeyTab() {
-        return keyTab;
-    }
-
-    /**
-     * Sets the key tab file name. See {@link #getKeyTab()} for more information.
-     *
-     * @param keyTab The key tab file name.
-     */
-    public void setKeyTab(@Nullable String keyTab) {
-        this.keyTab = keyTab;
-    }
-
-    /**
-     * The interval used to re-login from the key tab, in milliseconds.
-     * Important that the value should not be larger than the Kerberos ticket life time multiplied by 0.2. This is
-     * because the ticket renew window starts from {@code 0.8 * ticket life time}.
-     * Default ticket life time is 1 day (24 hours), so the default re-login interval (10 min)
-     * is obeys this rule well.
-     *
-     * <p>Zero value means that re-login should be attempted on each file system operation.
-     * Negative values are not allowed.
-     *
-     * <p>Note, however, that it does not make sense to make this value small, because Hadoop does not allow to
-     * login if less than {@link org.apache.hadoop.security.UserGroupInformation#MIN_TIME_BEFORE_RELOGIN} milliseconds
-     * have passed since the time of the previous login.
-     * See {@link org.apache.hadoop.security.UserGroupInformation#hasSufficientTimeElapsed(long)} and its usages for
-     * more detail.
-     *
-     * @return The re-login interval, in milliseconds.
-     */
-    public long getReloginInterval() {
-        return reloginInterval;
-    }
-
-    /**
-     * Sets the relogin interval in milliseconds. See {@link #getReloginInterval()} for more information.
-     *
-     * @param reloginInterval The re-login interval, in milliseconds.
-     */
-    public void setReloginInterval(long reloginInterval) {
-        this.reloginInterval = reloginInterval;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void start() throws IgniteException {
-        A.ensure(!F.isEmpty(keyTab), "keyTab cannot not be empty.");
-        A.ensure(!F.isEmpty(keyTabPrincipal), "keyTabPrincipal cannot not be empty.");
-        A.ensure(reloginInterval >= 0, "reloginInterval cannot not be negative.");
-
-        super.start();
-
-        try {
-            UserGroupInformation.setConfiguration(cfg);
-            UserGroupInformation.loginUserFromKeytab(keyTabPrincipal, keyTab);
-        }
-        catch (IOException ioe) {
-            throw new IgniteException("Failed login from keytab [keyTab=" + keyTab +
-                ", keyTabPrincipal=" + keyTabPrincipal + ']', ioe);
-        }
-    }
-
-    /**
-     * Re-logins the user if needed.
-     * First, the re-login interval defined in factory is checked. The re-login attempts will be not more
-     * frequent than one attempt per {@code reloginInterval}.
-     * Second, {@link UserGroupInformation#checkTGTAndReloginFromKeytab()} method invoked that gets existing
-     * TGT and checks its validity. If the TGT is expired or is close to expiry, it performs re-login.
-     *
-     * <p>This operation expected to be called upon each operation with the file system created with the factory.
-     * As long as {@link #get(String)} operation is invoked upon each file {@link IgniteHadoopFileSystem}, there
-     * is no need to invoke it otherwise specially.
-     *
-     * @throws IOException If login fails.
-     */
-    private void reloginIfNeeded() throws IOException {
-        long now = System.currentTimeMillis();
-
-        if (now >= lastReloginTime + reloginInterval) {
-            UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-
-            lastReloginTime = now;
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        U.writeString(out, keyTab);
-        U.writeString(out, keyTabPrincipal);
-        out.writeLong(reloginInterval);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        keyTab = U.readString(in);
-        keyTabPrincipal = U.readString(in);
-        reloginInterval = in.readLong();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package-info.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
deleted file mode 100644
index 164801f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Ignite Hadoop Accelerator file system API.
- */
-package org.apache.ignite.hadoop.fs;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
deleted file mode 100644
index a06129e..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java
+++ /dev/null
@@ -1,1364 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.hadoop.fs.v1;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.InvalidPathException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Progressable;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsException;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.internal.igfs.common.IgfsLogger;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsInputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsOutputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyInputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsProxyOutputStream;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsStreamDelegate;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsWrapper;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsModeResolver;
-import org.apache.ignite.internal.processors.igfs.IgfsPaths;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.A;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lifecycle.LifecycleAware;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_BATCH_SIZE;
-import static org.apache.ignite.configuration.FileSystemConfiguration.DFLT_IGFS_LOG_DIR;
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_COLOCATED_WRITES;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_BATCH_SIZE;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_DIR;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_LOG_ENABLED;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
-import static org.apache.ignite.internal.processors.igfs.IgfsEx.IGFS_SCHEME;
-
-/**
- * {@code IGFS} Hadoop 1.x file system driver over file system API. To use
- * {@code IGFS} as Hadoop file system, you should configure this class
- * in Hadoop's {@code core-site.xml} as follows:
- * <pre name="code" class="xml">
- *  &lt;property&gt;
- *      &lt;name&gt;fs.default.name&lt;/name&gt;
- *      &lt;value&gt;igfs:///&lt;/value&gt;
- *  &lt;/property&gt;
- *
- *  &lt;property&gt;
- *      &lt;name&gt;fs.igfs.impl&lt;/name&gt;
- *      &lt;value&gt;org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem&lt;/value&gt;
- *  &lt;/property&gt;
- * </pre>
- * You should also add Ignite JAR and all libraries to Hadoop classpath. To
- * do this, add following lines to {@code conf/hadoop-env.sh} script in Hadoop
- * distribution:
- * <pre name="code" class="bash">
- * export IGNITE_HOME=/path/to/Ignite/distribution
- * export HADOOP_CLASSPATH=$IGNITE_HOME/ignite*.jar
- *
- * for f in $IGNITE_HOME/libs/*.jar; do
- *  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f;
- * done
- * </pre>
- * <h1 class="header">Data vs Clients Nodes</h1>
- * Hadoop needs to use its FileSystem remotely from client nodes as well as directly on
- * data nodes. Client nodes are responsible for basic file system operations as well as
- * accessing data nodes remotely. Usually, client nodes are started together
- * with {@code job-submitter} or {@code job-scheduler} processes, while data nodes are usually
- * started together with Hadoop {@code task-tracker} processes.
- * <p>
- * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml}
- * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation.
- */
-public class IgniteHadoopFileSystem extends FileSystem {
-    /** Internal property to indicate management connection. */
-    public static final String IGFS_MANAGEMENT = "fs.igfs.management.connection";
-
-    /** Empty array of file block locations. */
-    private static final BlockLocation[] EMPTY_BLOCK_LOCATIONS = new BlockLocation[0];
-
-    /** Empty array of file statuses. */
-    public static final FileStatus[] EMPTY_FILE_STATUS = new FileStatus[0];
-
-    /** Ensures that close routine is invoked at most once. */
-    private final AtomicBoolean closeGuard = new AtomicBoolean();
-
-    /** Grid remote client. */
-    private HadoopIgfsWrapper rmtClient;
-
-    /** working directory. */
-    private Path workingDir;
-
-    /** Default replication factor. */
-    private short dfltReplication;
-
-    /** Base file system uri. */
-    @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-    private URI uri;
-
-    /** Authority. */
-    private String uriAuthority;
-
-    /** Client logger. */
-    private IgfsLogger clientLog;
-
-    /** Secondary URI string. */
-    private URI secondaryUri;
-
-    /** The user name this file system was created on behalf of. */
-    private String user;
-
-    /** IGFS mode resolver. */
-    private IgfsModeResolver modeRslvr;
-
-    /** The secondary file system factory. */
-    private HadoopFileSystemFactory factory;
-
-    /** Management connection flag. */
-    private boolean mgmt;
-
-    /** Whether custom sequential reads before prefetch value is provided. */
-    private boolean seqReadsBeforePrefetchOverride;
-
-    /** IGFS group block size. */
-    private long igfsGrpBlockSize;
-
-    /** Flag that controls whether file writes should be colocated. */
-    private boolean colocateFileWrites;
-
-    /** Prefer local writes. */
-    private boolean preferLocFileWrites;
-
-    /** Custom-provided sequential reads before prefetch. */
-    private int seqReadsBeforePrefetch;
-
-    /** {@inheritDoc} */
-    @Override public URI getUri() {
-        if (uri == null)
-            throw new IllegalStateException("URI is null (was IgniteHadoopFileSystem properly initialized?).");
-
-        return uri;
-    }
-
-    /**
-     * Enter busy state.
-     *
-     * @throws IOException If file system is stopped.
-     */
-    private void enterBusy() throws IOException {
-        if (closeGuard.get())
-            throw new IOException("File system is stopped.");
-    }
-
-    /**
-     * Leave busy state.
-     */
-    private void leaveBusy() {
-        // No-op.
-    }
-
-    /**
-     * Gets non-null user name as per the Hadoop file system viewpoint.
-     * @return the user name, never null.
-     */
-    public static String getFsHadoopUser() throws IOException {
-        UserGroupInformation currUgi = UserGroupInformation.getCurrentUser();
-
-        String user = currUgi.getShortUserName();
-
-        user = IgfsUtils.fixUserName(user);
-
-        assert user != null;
-
-        return user;
-    }
-
-    /**
-     * Public setter that can be used by direct users of FS or Visor.
-     *
-     * @param colocateFileWrites Whether all ongoing file writes should be colocated.
-     */
-    @SuppressWarnings("UnusedDeclaration")
-    public void colocateFileWrites(boolean colocateFileWrites) {
-        this.colocateFileWrites = colocateFileWrites;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ConstantConditions")
-    @Override public void initialize(URI name, Configuration cfg) throws IOException {
-        enterBusy();
-
-        try {
-            if (rmtClient != null)
-                throw new IOException("File system is already initialized: " + rmtClient);
-
-            A.notNull(name, "name");
-            A.notNull(cfg, "cfg");
-
-            super.initialize(name, cfg);
-
-            setConf(cfg);
-
-            mgmt = cfg.getBoolean(IGFS_MANAGEMENT, false);
-
-            if (!IGFS_SCHEME.equals(name.getScheme()))
-                throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME +
-                    "://[name]/[optional_path], actual=" + name + ']');
-
-            uri = name;
-
-            uriAuthority = uri.getAuthority();
-
-            user = getFsHadoopUser();
-
-            // Override sequential reads before prefetch if needed.
-            seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
-
-            if (seqReadsBeforePrefetch > 0)
-                seqReadsBeforePrefetchOverride = true;
-
-            // In Ignite replication factor is controlled by data cache affinity.
-            // We use replication factor to force the whole file to be stored on local node.
-            dfltReplication = (short)cfg.getInt("dfs.replication", 3);
-
-            // Get file colocation control flag.
-            colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
-            preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
-
-            // Get log directory.
-            String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
-
-            File logDirFile = U.resolveIgnitePath(logDirCfg);
-
-            String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
-
-            rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
-
-            // Handshake.
-            IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
-
-            igfsGrpBlockSize = handshake.blockSize();
-
-            IgfsPaths paths = handshake.secondaryPaths();
-
-            // Initialize client logger.
-            Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
-
-            if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
-                // Initiate client logger.
-                if (logDir == null)
-                    throw new IOException("Failed to resolve log directory: " + logDirCfg);
-
-                Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
-
-                clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
-            }
-            else
-                clientLog = IgfsLogger.disabledLogger();
-
-            try {
-                modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
-            }
-            catch (IgniteCheckedException ice) {
-                throw new IOException(ice);
-            }
-
-            boolean initSecondary = paths.defaultMode() == PROXY;
-
-            if (!initSecondary && paths.pathModes() != null && !paths.pathModes().isEmpty()) {
-                for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
-                    IgfsMode mode = pathMode.getValue();
-
-                    if (mode == PROXY) {
-                        initSecondary = true;
-
-                        break;
-                    }
-                }
-            }
-
-            if (initSecondary) {
-                try {
-                    factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
-                }
-                catch (IgniteCheckedException e) {
-                    throw new IOException("Failed to get secondary file system factory.", e);
-                }
-
-                if (factory == null)
-                    throw new IOException("Failed to get secondary file system factory (did you set " +
-                        IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " +
-                        FileSystemConfiguration.class.getName() + "?)");
-
-                if (factory instanceof LifecycleAware)
-                    ((LifecycleAware) factory).start();
-
-                try {
-                    FileSystem secFs = factory.get(user);
-
-                    secondaryUri = secFs.getUri();
-
-                    A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
-                }
-                catch (IOException e) {
-                    if (!mgmt)
-                        throw new IOException("Failed to connect to the secondary file system: " + secondaryUri, e);
-                    else
-                        LOG.warn("Visor failed to create secondary file system (operations on paths with PROXY mode " +
-                            "will have no effect): " + e.getMessage());
-                }
-            }
-
-            // set working directory to the home directory of the current Fs user:
-            setWorkingDirectory(null);
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void checkPath(Path path) {
-        URI uri = path.toUri();
-
-        if (uri.isAbsolute()) {
-            if (!F.eq(uri.getScheme(), IGFS_SCHEME))
-                throw new InvalidPathException("Wrong path scheme [expected=" + IGFS_SCHEME + ", actual=" +
-                    uri.getAuthority() + ']');
-
-            if (!F.eq(uri.getAuthority(), uriAuthority))
-                throw new InvalidPathException("Wrong path authority [expected=" + uriAuthority + ", actual=" +
-                    uri.getAuthority() + ']');
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public short getDefaultReplication() {
-        return dfltReplication;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void finalize() throws Throwable {
-        super.finalize();
-
-        close();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IOException {
-        if (closeGuard.compareAndSet(false, true))
-            close0();
-    }
-
-    /**
-     * Closes file system.
-     *
-     * @throws IOException If failed.
-     */
-    private void close0() throws IOException {
-        if (LOG.isDebugEnabled())
-            LOG.debug("File system closed [uri=" + uri + ", endpoint=" + uriAuthority + ']');
-
-        if (rmtClient == null)
-            return;
-
-        super.close();
-
-        rmtClient.close(false);
-
-        if (clientLog.isLogEnabled())
-            clientLog.close();
-
-        if (factory instanceof LifecycleAware)
-            ((LifecycleAware) factory).stop();
-
-        // Reset initialized resources.
-        uri = null;
-        rmtClient = null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setTimes(Path p, long mtime, long atime) throws IOException {
-        enterBusy();
-
-        try {
-            A.notNull(p, "p");
-
-            if (mode(p) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    // No-op for management connection.
-                    return;
-                }
-
-                secondaryFs.setTimes(toSecondary(p), mtime, atime);
-            }
-            else {
-                IgfsPath path = convert(p);
-
-                rmtClient.setTimes(path, atime, mtime);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setPermission(Path p, FsPermission perm) throws IOException {
-        enterBusy();
-
-        try {
-            A.notNull(p, "p");
-
-            if (mode(p) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    // No-op for management connection.
-                    return;
-                }
-
-                secondaryFs.setPermission(toSecondary(p), perm);
-            }
-            else if (rmtClient.update(convert(p), permission(perm)) == null) {
-                throw new IOException("Failed to set file permission (file not found?)" +
-                    " [path=" + p + ", perm=" + perm + ']');
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setOwner(Path p, String username, String grpName) throws IOException {
-        A.notNull(p, "p");
-        A.notNull(username, "username");
-        A.notNull(grpName, "grpName");
-
-        enterBusy();
-
-        try {
-            if (mode(p) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    // No-op for management connection.
-                    return;
-                }
-
-                secondaryFs.setOwner(toSecondary(p), username, grpName);
-            }
-            else if (rmtClient.update(convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, username,
-                IgfsUtils.PROP_GROUP_NAME, grpName)) == null) {
-                throw new IOException("Failed to set file permission (file not found?)" +
-                    " [path=" + p + ", userName=" + username + ", groupName=" + grpName + ']');
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FSDataInputStream open(Path f, int bufSize) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    throw new IOException("Failed to open file (secondary file system is not initialized): " + f);
-                }
-
-                FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize);
-
-                if (clientLog.isLogEnabled()) {
-                    // At this point we do not know file size, so we perform additional request to remote FS to get it.
-                    FileStatus status = secondaryFs.getFileStatus(toSecondary(f));
-
-                    long size = status != null ? status.getLen() : -1;
-
-                    long logId = IgfsLogger.nextId();
-
-                    clientLog.logOpen(logId, path, PROXY, bufSize, size);
-
-                    return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId));
-                }
-                else
-                    return is;
-            }
-            else {
-                HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ?
-                    rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path);
-
-                long logId = -1;
-
-                if (clientLog.isLogEnabled()) {
-                    logId = IgfsLogger.nextId();
-
-                    clientLog.logOpen(logId, path, mode, bufSize, stream.length());
-                }
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path +
-                        ", bufSize=" + bufSize + ']');
-
-                HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(),
-                    bufSize, LOG, clientLog, logId);
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');
-
-                return new FSDataInputStream(igfsIn);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public FSDataOutputStream create(Path f, final FsPermission perm, boolean overwrite, int bufSize,
-        short replication, long blockSize, Progressable progress) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        OutputStream out = null;
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (LOG.isDebugEnabled())
-                LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" +
-                    path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    throw new IOException("Failed to create file (secondary file system is not initialized): " + f);
-                }
-
-                FSDataOutputStream os =
-                    secondaryFs.create(toSecondary(f), perm, overwrite, bufSize, replication, blockSize, progress);
-
-                if (clientLog.isLogEnabled()) {
-                    long logId = IgfsLogger.nextId();
-
-                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);
-
-                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
-                }
-                else
-                    return os;
-            }
-            else {
-                Map<String,String> propMap = permission(perm);
-
-                propMap.put(IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites));
-
-                // Create stream and close it in the 'finally' section if any sequential operation failed.
-                HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites,
-                    replication, blockSize, propMap);
-
-                assert stream != null;
-
-                long logId = -1;
-
-                if (clientLog.isLogEnabled()) {
-                    logId = IgfsLogger.nextId();
-
-                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
-                }
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
-
-                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog,
-                    logId);
-
-                bufSize = Math.max(64 * 1024, bufSize);
-
-                out = new BufferedOutputStream(igfsOut, bufSize);
-
-                FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
-
-                // Mark stream created successfully.
-                out = null;
-
-                return res;
-            }
-        }
-        finally {
-            // Close if failed during stream creation.
-            if (out != null)
-                U.closeQuiet(out);
-
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (LOG.isDebugEnabled())
-                LOG.debug("Opening output stream in append [thread=" + Thread.currentThread().getName() +
-                    ", path=" + path + ", bufSize=" + bufSize + ']');
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    throw new IOException("Failed to append file (secondary file system is not initialized): " + f);
-                }
-
-                FSDataOutputStream os = secondaryFs.append(toSecondary(f), bufSize, progress);
-
-                if (clientLog.isLogEnabled()) {
-                    long logId = IgfsLogger.nextId();
-
-                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
-
-                    return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
-                }
-                else
-                    return os;
-            }
-            else {
-                HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null);
-
-                assert stream != null;
-
-                long logId = -1;
-
-                if (clientLog.isLogEnabled()) {
-                    logId = IgfsLogger.nextId();
-
-                    clientLog.logAppend(logId, path, mode, bufSize);
-                }
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
-
-                HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog,
-                    logId);
-
-                bufSize = Math.max(64 * 1024, bufSize);
-
-                BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize);
-
-                return new FSDataOutputStream(out, null, 0);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public boolean rename(Path src, Path dst) throws IOException {
-        A.notNull(src, "src");
-        A.notNull(dst, "dst");
-
-        enterBusy();
-
-        try {
-            IgfsPath srcPath = convert(src);
-            IgfsPath dstPath = convert(dst);
-            IgfsMode mode = mode(srcPath);
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    return false;
-                }
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logRename(srcPath, PROXY, dstPath);
-
-                return secondaryFs.rename(toSecondary(src), toSecondary(dst));
-            }
-            else {
-                if (clientLog.isLogEnabled())
-                    clientLog.logRename(srcPath, mode, dstPath);
-
-                try {
-                    rmtClient.rename(srcPath, dstPath);
-                }
-                catch (IOException ioe) {
-                    // Log the exception before rethrowing since it may be ignored:
-                    LOG.warn("Failed to rename [srcPath=" + srcPath + ", dstPath=" + dstPath + ", mode=" + mode + ']',
-                        ioe);
-
-                    throw ioe;
-                }
-
-                return true;
-            }
-        }
-        catch (IOException e) {
-            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
-            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
-                !X.hasCause(e.getCause(), IgfsException.class)))
-                throw e;
-            else
-                return false;
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public boolean delete(Path f) throws IOException {
-        return delete(f, false);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public boolean delete(Path f, boolean recursive) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    return false;
-                }
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logDelete(path, PROXY, recursive);
-
-                return secondaryFs.delete(toSecondary(f), recursive);
-            }
-            else {
-                // Will throw exception if delete failed.
-                boolean res = rmtClient.delete(path, recursive);
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logDelete(path, mode, recursive);
-
-                return res;
-            }
-        }
-        catch (IOException e) {
-            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
-            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
-                !X.hasCause(e.getCause(), IgfsException.class)))
-                throw e;
-            else
-                return false;
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus[] listStatus(Path f) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    return EMPTY_FILE_STATUS;
-                }
-
-                FileStatus[] arr = secondaryFs.listStatus(toSecondary(f));
-
-                if (arr == null)
-                    throw new FileNotFoundException("File " + f + " does not exist.");
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = toPrimary(arr[i]);
-
-                if (clientLog.isLogEnabled()) {
-                    String[] fileArr = new String[arr.length];
-
-                    for (int i = 0; i < arr.length; i++)
-                        fileArr[i] = arr[i].getPath().toString();
-
-                    clientLog.logListDirectory(path, PROXY, fileArr);
-                }
-
-                return arr;
-            }
-            else {
-                Collection<IgfsFile> list = rmtClient.listFiles(path);
-
-                if (list == null)
-                    throw new FileNotFoundException("File " + f + " does not exist.");
-
-                List<IgfsFile> files = new ArrayList<>(list);
-
-                FileStatus[] arr = new FileStatus[files.size()];
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = convert(files.get(i));
-
-                if (clientLog.isLogEnabled()) {
-                    String[] fileArr = new String[arr.length];
-
-                    for (int i = 0; i < arr.length; i++)
-                        fileArr[i] = arr[i].getPath().toString();
-
-                    clientLog.logListDirectory(path, mode, fileArr);
-                }
-
-                return arr;
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getHomeDirectory() {
-        Path path = new Path("/user/" + user);
-
-        return path.makeQualified(getUri(), null);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void setWorkingDirectory(Path newPath) {
-        try {
-            if (newPath == null) {
-                Path homeDir = getHomeDirectory();
-
-                FileSystem secondaryFs  = secondaryFileSystem();
-
-                if (secondaryFs != null)
-                    secondaryFs.setWorkingDirectory(toSecondary(homeDir));
-
-                workingDir = homeDir;
-            }
-            else {
-                Path fixedNewPath = fixRelativePart(newPath);
-
-                String res = fixedNewPath.toUri().getPath();
-
-                if (!DFSUtil.isValidName(res))
-                    throw new IllegalArgumentException("Invalid DFS directory name " + res);
-
-                FileSystem secondaryFs  = secondaryFileSystem();
-
-                if (secondaryFs != null)
-                    secondaryFs.setWorkingDirectory(toSecondary(fixedNewPath));
-
-                workingDir = fixedNewPath;
-            }
-        }
-        catch (IOException e) {
-            throw new RuntimeException("Failed to obtain secondary file system instance.", e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Path getWorkingDirectory() {
-        return workingDir;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public boolean mkdirs(Path f, FsPermission perm) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(f);
-            IgfsMode mode = mode(path);
-
-            if (mode == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    return false;
-                }
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logMakeDirectory(path, PROXY);
-
-                return secondaryFs.mkdirs(toSecondary(f), perm);
-            }
-            else {
-                boolean mkdirRes = rmtClient.mkdirs(path, permission(perm));
-
-                if (clientLog.isLogEnabled())
-                    clientLog.logMakeDirectory(path, mode);
-
-                return mkdirRes;
-            }
-        }
-        catch (IOException e) {
-            // Intentionally ignore IGFS exceptions here to follow Hadoop contract.
-            if (F.eq(IOException.class, e.getClass()) && (e.getCause() == null ||
-                !X.hasCause(e.getCause(), IgfsException.class)))
-                throw e;
-            else
-                return false;
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileStatus getFileStatus(Path f) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            if (mode(f) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    throw new IOException("Failed to get file status (secondary file system is not initialized): " + f);
-                }
-
-                return toPrimary(secondaryFs.getFileStatus(toSecondary(f)));
-            }
-            else {
-                IgfsFile info = rmtClient.info(convert(f));
-
-                if (info == null)
-                    throw new FileNotFoundException("File not found: " + f);
-
-                return convert(info);
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public ContentSummary getContentSummary(Path f) throws IOException {
-        A.notNull(f, "f");
-
-        enterBusy();
-
-        try {
-            if (mode(f) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    throw new IOException("Failed to get content summary (secondary file system is not initialized): " +
-                        f);
-                }
-
-                return secondaryFs.getContentSummary(toSecondary(f));
-            }
-            else {
-                IgfsPathSummary sum = rmtClient.contentSummary(convert(f));
-
-                return new ContentSummary(sum.totalLength(), sum.filesCount(), sum.directoriesCount(),
-                    -1, sum.totalLength(), rmtClient.fsStatus().spaceTotal());
-            }
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public BlockLocation[] getFileBlockLocations(FileStatus status, long start, long len) throws IOException {
-        A.notNull(status, "status");
-
-        enterBusy();
-
-        try {
-            IgfsPath path = convert(status.getPath());
-
-            if (mode(status.getPath()) == PROXY) {
-                final FileSystem secondaryFs = secondaryFileSystem();
-
-                if (secondaryFs == null) {
-                    assert mgmt;
-
-                    return EMPTY_BLOCK_LOCATIONS;
-                }
-
-                Path secPath = toSecondary(status.getPath());
-
-                return secondaryFs.getFileBlockLocations(secondaryFs.getFileStatus(secPath), start, len);
-            }
-            else {
-                long now = System.currentTimeMillis();
-
-                List<IgfsBlockLocation> affinity = new ArrayList<>(rmtClient.affinity(path, start, len));
-
-                BlockLocation[] arr = new BlockLocation[affinity.size()];
-
-                for (int i = 0; i < arr.length; i++)
-                    arr[i] = convert(affinity.get(i));
-
-                if (LOG.isDebugEnabled())
-                    LOG.debug("Fetched file locations [path=" + path + ", fetchTime=" +
-                        (System.currentTimeMillis() - now) + ", locations=" + Arrays.asList(arr) + ']');
-
-                return arr;
-            }
-        }
-        catch (FileNotFoundException ignored) {
-            return EMPTY_BLOCK_LOCATIONS;
-        }
-        finally {
-            leaveBusy();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("deprecation")
-    @Override public long getDefaultBlockSize() {
-        return igfsGrpBlockSize;
-    }
-
-    /**
-     * Resolve path mode.
-     *
-     * @param path HDFS path.
-     * @return Path mode.
-     */
-    public IgfsMode mode(Path path) {
-        return mode(convert(path));
-    }
-
-    /**
-     * Resolve path mode.
-     *
-     * @param path IGFS path.
-     * @return Path mode.
-     */
-    public IgfsMode mode(IgfsPath path) {
-        return modeRslvr.resolveMode(path);
-    }
-
-    /**
-     * @return {@code true} If secondary file system is initialized.
-     */
-    public boolean hasSecondaryFileSystem() {
-        return factory != null;
-    }
-
-    /**
-     * Convert the given path to path acceptable by the primary file system.
-     *
-     * @param path Path.
-     * @return Primary file system path.
-     */
-    private Path toPrimary(Path path) {
-        return convertPath(path, uri);
-    }
-
-    /**
-     * Convert the given path to path acceptable by the secondary file system.
-     *
-     * @param path Path.
-     * @return Secondary file system path.
-     */
-    private Path toSecondary(Path path) {
-        assert factory != null;
-        assert secondaryUri != null;
-
-        return convertPath(path, secondaryUri);
-    }
-
-    /**
-     * Convert path using the given new URI.
-     *
-     * @param path Old path.
-     * @param newUri New URI.
-     * @return New path.
-     */
-    private Path convertPath(Path path, URI newUri) {
-        assert newUri != null;
-
-        if (path != null) {
-            URI pathUri = path.toUri();
-
-            try {
-                return new Path(new URI(pathUri.getScheme() != null ? newUri.getScheme() : null,
-                    pathUri.getAuthority() != null ? newUri.getAuthority() : null, pathUri.getPath(), null, null));
-            }
-            catch (URISyntaxException e) {
-                throw new IgniteException("Failed to construct secondary file system path from the primary file " +
-                    "system path: " + path, e);
-            }
-        }
-        else
-            return null;
-    }
-
-    /**
-     * Convert a file status obtained from the secondary file system to a status of the primary file system.
-     *
-     * @param status Secondary file system status.
-     * @return Primary file system status.
-     */
-    @SuppressWarnings("deprecation")
-    private FileStatus toPrimary(FileStatus status) {
-        return status != null ? new FileStatus(status.getLen(), status.isDir(), status.getReplication(),
-            status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(),
-            status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null;
-    }
-
-    /**
-     * Convert IGFS path into Hadoop path.
-     *
-     * @param path IGFS path.
-     * @return Hadoop path.
-     */
-    private Path convert(IgfsPath path) {
-        return new Path(IGFS_SCHEME, uriAuthority, path.toString());
-    }
-
-    /**
-     * Convert Hadoop path into IGFS path.
-     *
-     * @param path Hadoop path.
-     * @return IGFS path.
-     */
-    @Nullable private IgfsPath convert(@Nullable Path path) {
-        if (path == null)
-            return null;
-
-        return path.isAbsolute() ? new IgfsPath(path.toUri().getPath()) :
-            new IgfsPath(convert(workingDir), path.toUri().getPath());
-    }
-
-    /**
-     * Convert IGFS affinity block location into Hadoop affinity block location.
-     *
-     * @param block IGFS affinity block location.
-     * @return Hadoop affinity block location.
-     */
-    private BlockLocation convert(IgfsBlockLocation block) {
-        Collection<String> names = block.names();
-        Collection<String> hosts = block.hosts();
-
-        return new BlockLocation(
-            names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
-            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */,
-            block.start(), block.length()
-        ) {
-            @Override public String toString() {
-                try {
-                    return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() +
-                        ", hosts=" + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
-                }
-                catch (IOException e) {
-                    throw new RuntimeException(e);
-                }
-            }
-        };
-    }
-
-    /**
-     * Convert IGFS file information into Hadoop file status.
-     *
-     * @param file IGFS file information.
-     * @return Hadoop file status.
-     */
-    @SuppressWarnings("deprecation")
-    private FileStatus convert(IgfsFile file) {
-        return new FileStatus(
-            file.length(),
-            file.isDirectory(),
-            getDefaultReplication(),
-            file.groupBlockSize(),
-            file.modificationTime(),
-            file.accessTime(),
-            permission(file),
-            file.property(IgfsUtils.PROP_USER_NAME, user),
-            file.property(IgfsUtils.PROP_GROUP_NAME, "users"),
-            convert(file.path())) {
-            @Override public String toString() {
-                return "FileStatus [path=" + getPath() + ", isDir=" + isDir() + ", len=" + getLen() +
-                    ", mtime=" + getModificationTime() + ", atime=" + getAccessTime() + ']';
-            }
-        };
-    }
-
-    /**
-     * Convert Hadoop permission into IGFS file attribute.
-     *
-     * @param perm Hadoop permission.
-     * @return IGFS attributes.
-     */
-    private Map<String, String> permission(FsPermission perm) {
-        if (perm == null)
-            perm = FsPermission.getDefault();
-
-        return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
-    }
-
-    /**
-     * @param perm Permission.
-     * @return String.
-     */
-    private static String toString(FsPermission perm) {
-        return String.format("%04o", perm.toShort());
-    }
-
-    /**
-     * Convert IGFS file attributes into Hadoop permission.
-     *
-     * @param file File info.
-     * @return Hadoop permission.
-     */
-    private FsPermission permission(IgfsFile file) {
-        String perm = file.property(IgfsUtils.PROP_PERMISSION, null);
-
-        if (perm == null)
-            return FsPermission.getDefault();
-
-        try {
-            return new FsPermission((short)Integer.parseInt(perm, 8));
-        }
-        catch (NumberFormatException ignore) {
-            return FsPermission.getDefault();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(IgniteHadoopFileSystem.class, this);
-    }
-
-    /**
-     * Returns the user name this File System is created on behalf of.
-     * @return the user name
-     */
-    public String user() {
-        return user;
-    }
-
-    /**
-     * Gets cached or creates a {@link FileSystem}.
-     *
-     * @return The secondary file system.
-     */
-    private @Nullable FileSystem secondaryFileSystem() throws IOException{
-        if (factory == null)
-            return null;
-
-        return factory.get(user);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
deleted file mode 100644
index 60e62ca..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Contains Ignite Hadoop 1.x <code>FileSystem</code> implementation.
- */
-package org.apache.ignite.hadoop.fs.v1;
\ No newline at end of file


[28/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/alice-in-wonderland.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/alice-in-wonderland.txt b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/alice-in-wonderland.txt
new file mode 100644
index 0000000..d65883a
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/alice-in-wonderland.txt
@@ -0,0 +1,3735 @@
+Project Gutenberg's Alice's Adventures in Wonderland, by Lewis Carroll
+
+This eBook is for the use of anyone anywhere at no cost and with
+almost no restrictions whatsoever.  You may copy it, give it away or
+re-use it under the terms of the Project Gutenberg License included
+with this eBook or online at www.gutenberg.org
+
+
+Title: Alice's Adventures in Wonderland
+
+Author: Lewis Carroll
+
+Posting Date: June 25, 2008 [EBook #11]
+Release Date: March, 1994
+[Last updated: December 20, 2011]
+
+Language: English
+
+
+*** START OF THIS PROJECT GUTENBERG EBOOK ALICE'S ADVENTURES IN WONDERLAND ***
+
+
+
+
+
+
+
+
+
+
+ALICE'S ADVENTURES IN WONDERLAND
+
+Lewis Carroll
+
+THE MILLENNIUM FULCRUM EDITION 3.0
+
+
+
+
+CHAPTER I. Down the Rabbit-Hole
+
+Alice was beginning to get very tired of sitting by her sister on the
+bank, and of having nothing to do: once or twice she had peeped into the
+book her sister was reading, but it had no pictures or conversations in
+it, 'and what is the use of a book,' thought Alice 'without pictures or
+conversation?'
+
+So she was considering in her own mind (as well as she could, for the
+hot day made her feel very sleepy and stupid), whether the pleasure
+of making a daisy-chain would be worth the trouble of getting up and
+picking the daisies, when suddenly a White Rabbit with pink eyes ran
+close by her.
+
+There was nothing so VERY remarkable in that; nor did Alice think it so
+VERY much out of the way to hear the Rabbit say to itself, 'Oh dear!
+Oh dear! I shall be late!' (when she thought it over afterwards, it
+occurred to her that she ought to have wondered at this, but at the time
+it all seemed quite natural); but when the Rabbit actually TOOK A WATCH
+OUT OF ITS WAISTCOAT-POCKET, and looked at it, and then hurried on,
+Alice started to her feet, for it flashed across her mind that she had
+never before seen a rabbit with either a waistcoat-pocket, or a watch
+to take out of it, and burning with curiosity, she ran across the field
+after it, and fortunately was just in time to see it pop down a large
+rabbit-hole under the hedge.
+
+In another moment down went Alice after it, never once considering how
+in the world she was to get out again.
+
+The rabbit-hole went straight on like a tunnel for some way, and then
+dipped suddenly down, so suddenly that Alice had not a moment to think
+about stopping herself before she found herself falling down a very deep
+well.
+
+Either the well was very deep, or she fell very slowly, for she had
+plenty of time as she went down to look about her and to wonder what was
+going to happen next. First, she tried to look down and make out what
+she was coming to, but it was too dark to see anything; then she
+looked at the sides of the well, and noticed that they were filled with
+cupboards and book-shelves; here and there she saw maps and pictures
+hung upon pegs. She took down a jar from one of the shelves as
+she passed; it was labelled 'ORANGE MARMALADE', but to her great
+disappointment it was empty: she did not like to drop the jar for fear
+of killing somebody, so managed to put it into one of the cupboards as
+she fell past it.
+
+'Well!' thought Alice to herself, 'after such a fall as this, I shall
+think nothing of tumbling down stairs! How brave they'll all think me at
+home! Why, I wouldn't say anything about it, even if I fell off the top
+of the house!' (Which was very likely true.)
+
+Down, down, down. Would the fall NEVER come to an end! 'I wonder how
+many miles I've fallen by this time?' she said aloud. 'I must be getting
+somewhere near the centre of the earth. Let me see: that would be four
+thousand miles down, I think--' (for, you see, Alice had learnt several
+things of this sort in her lessons in the schoolroom, and though this
+was not a VERY good opportunity for showing off her knowledge, as there
+was no one to listen to her, still it was good practice to say it over)
+'--yes, that's about the right distance--but then I wonder what Latitude
+or Longitude I've got to?' (Alice had no idea what Latitude was, or
+Longitude either, but thought they were nice grand words to say.)
+
+Presently she began again. 'I wonder if I shall fall right THROUGH the
+earth! How funny it'll seem to come out among the people that walk with
+their heads downward! The Antipathies, I think--' (she was rather glad
+there WAS no one listening, this time, as it didn't sound at all the
+right word) '--but I shall have to ask them what the name of the country
+is, you know. Please, Ma'am, is this New Zealand or Australia?' (and
+she tried to curtsey as she spoke--fancy CURTSEYING as you're falling
+through the air! Do you think you could manage it?) 'And what an
+ignorant little girl she'll think me for asking! No, it'll never do to
+ask: perhaps I shall see it written up somewhere.'
+
+Down, down, down. There was nothing else to do, so Alice soon began
+talking again. 'Dinah'll miss me very much to-night, I should think!'
+(Dinah was the cat.) 'I hope they'll remember her saucer of milk at
+tea-time. Dinah my dear! I wish you were down here with me! There are no
+mice in the air, I'm afraid, but you might catch a bat, and that's very
+like a mouse, you know. But do cats eat bats, I wonder?' And here Alice
+began to get rather sleepy, and went on saying to herself, in a dreamy
+sort of way, 'Do cats eat bats? Do cats eat bats?' and sometimes, 'Do
+bats eat cats?' for, you see, as she couldn't answer either question,
+it didn't much matter which way she put it. She felt that she was dozing
+off, and had just begun to dream that she was walking hand in hand with
+Dinah, and saying to her very earnestly, 'Now, Dinah, tell me the truth:
+did you ever eat a bat?' when suddenly, thump! thump! down she came upon
+a heap of sticks and dry leaves, and the fall was over.
+
+Alice was not a bit hurt, and she jumped up on to her feet in a moment:
+she looked up, but it was all dark overhead; before her was another
+long passage, and the White Rabbit was still in sight, hurrying down it.
+There was not a moment to be lost: away went Alice like the wind, and
+was just in time to hear it say, as it turned a corner, 'Oh my ears
+and whiskers, how late it's getting!' She was close behind it when she
+turned the corner, but the Rabbit was no longer to be seen: she found
+herself in a long, low hall, which was lit up by a row of lamps hanging
+from the roof.
+
+There were doors all round the hall, but they were all locked; and when
+Alice had been all the way down one side and up the other, trying every
+door, she walked sadly down the middle, wondering how she was ever to
+get out again.
+
+Suddenly she came upon a little three-legged table, all made of solid
+glass; there was nothing on it except a tiny golden key, and Alice's
+first thought was that it might belong to one of the doors of the hall;
+but, alas! either the locks were too large, or the key was too small,
+but at any rate it would not open any of them. However, on the second
+time round, she came upon a low curtain she had not noticed before, and
+behind it was a little door about fifteen inches high: she tried the
+little golden key in the lock, and to her great delight it fitted!
+
+Alice opened the door and found that it led into a small passage, not
+much larger than a rat-hole: she knelt down and looked along the passage
+into the loveliest garden you ever saw. How she longed to get out of
+that dark hall, and wander about among those beds of bright flowers and
+those cool fountains, but she could not even get her head through the
+doorway; 'and even if my head would go through,' thought poor Alice, 'it
+would be of very little use without my shoulders. Oh, how I wish I could
+shut up like a telescope! I think I could, if I only know how to begin.'
+For, you see, so many out-of-the-way things had happened lately,
+that Alice had begun to think that very few things indeed were really
+impossible.
+
+There seemed to be no use in waiting by the little door, so she went
+back to the table, half hoping she might find another key on it, or at
+any rate a book of rules for shutting people up like telescopes: this
+time she found a little bottle on it, ('which certainly was not here
+before,' said Alice,) and round the neck of the bottle was a paper
+label, with the words 'DRINK ME' beautifully printed on it in large
+letters.
+
+It was all very well to say 'Drink me,' but the wise little Alice was
+not going to do THAT in a hurry. 'No, I'll look first,' she said, 'and
+see whether it's marked "poison" or not'; for she had read several nice
+little histories about children who had got burnt, and eaten up by wild
+beasts and other unpleasant things, all because they WOULD not remember
+the simple rules their friends had taught them: such as, that a red-hot
+poker will burn you if you hold it too long; and that if you cut your
+finger VERY deeply with a knife, it usually bleeds; and she had never
+forgotten that, if you drink much from a bottle marked 'poison,' it is
+almost certain to disagree with you, sooner or later.
+
+However, this bottle was NOT marked 'poison,' so Alice ventured to taste
+it, and finding it very nice, (it had, in fact, a sort of mixed flavour
+of cherry-tart, custard, pine-apple, roast turkey, toffee, and hot
+buttered toast,) she very soon finished it off.
+
+  *    *    *    *    *    *    *
+
+    *    *    *    *    *    *
+
+  *    *    *    *    *    *    *
+
+'What a curious feeling!' said Alice; 'I must be shutting up like a
+telescope.'
+
+And so it was indeed: she was now only ten inches high, and her face
+brightened up at the thought that she was now the right size for going
+through the little door into that lovely garden. First, however, she
+waited for a few minutes to see if she was going to shrink any further:
+she felt a little nervous about this; 'for it might end, you know,' said
+Alice to herself, 'in my going out altogether, like a candle. I wonder
+what I should be like then?' And she tried to fancy what the flame of a
+candle is like after the candle is blown out, for she could not remember
+ever having seen such a thing.
+
+After a while, finding that nothing more happened, she decided on going
+into the garden at once; but, alas for poor Alice! when she got to the
+door, she found she had forgotten the little golden key, and when she
+went back to the table for it, she found she could not possibly reach
+it: she could see it quite plainly through the glass, and she tried her
+best to climb up one of the legs of the table, but it was too slippery;
+and when she had tired herself out with trying, the poor little thing
+sat down and cried.
+
+'Come, there's no use in crying like that!' said Alice to herself,
+rather sharply; 'I advise you to leave off this minute!' She generally
+gave herself very good advice, (though she very seldom followed it),
+and sometimes she scolded herself so severely as to bring tears into
+her eyes; and once she remembered trying to box her own ears for having
+cheated herself in a game of croquet she was playing against herself,
+for this curious child was very fond of pretending to be two people.
+'But it's no use now,' thought poor Alice, 'to pretend to be two people!
+Why, there's hardly enough of me left to make ONE respectable person!'
+
+Soon her eye fell on a little glass box that was lying under the table:
+she opened it, and found in it a very small cake, on which the words
+'EAT ME' were beautifully marked in currants. 'Well, I'll eat it,' said
+Alice, 'and if it makes me grow larger, I can reach the key; and if it
+makes me grow smaller, I can creep under the door; so either way I'll
+get into the garden, and I don't care which happens!'
+
+She ate a little bit, and said anxiously to herself, 'Which way? Which
+way?', holding her hand on the top of her head to feel which way it was
+growing, and she was quite surprised to find that she remained the same
+size: to be sure, this generally happens when one eats cake, but Alice
+had got so much into the way of expecting nothing but out-of-the-way
+things to happen, that it seemed quite dull and stupid for life to go on
+in the common way.
+
+So she set to work, and very soon finished off the cake.
+
+  *    *    *    *    *    *    *
+
+    *    *    *    *    *    *
+
+  *    *    *    *    *    *    *
+
+
+
+
+CHAPTER II. The Pool of Tears
+
+'Curiouser and curiouser!' cried Alice (she was so much surprised, that
+for the moment she quite forgot how to speak good English); 'now I'm
+opening out like the largest telescope that ever was! Good-bye, feet!'
+(for when she looked down at her feet, they seemed to be almost out of
+sight, they were getting so far off). 'Oh, my poor little feet, I wonder
+who will put on your shoes and stockings for you now, dears? I'm sure
+_I_ shan't be able! I shall be a great deal too far off to trouble
+myself about you: you must manage the best way you can;--but I must be
+kind to them,' thought Alice, 'or perhaps they won't walk the way I want
+to go! Let me see: I'll give them a new pair of boots every Christmas.'
+
+And she went on planning to herself how she would manage it. 'They must
+go by the carrier,' she thought; 'and how funny it'll seem, sending
+presents to one's own feet! And how odd the directions will look!
+
+     ALICE'S RIGHT FOOT, ESQ.
+       HEARTHRUG,
+         NEAR THE FENDER,
+           (WITH ALICE'S LOVE).
+
+Oh dear, what nonsense I'm talking!'
+
+Just then her head struck against the roof of the hall: in fact she was
+now more than nine feet high, and she at once took up the little golden
+key and hurried off to the garden door.
+
+Poor Alice! It was as much as she could do, lying down on one side, to
+look through into the garden with one eye; but to get through was more
+hopeless than ever: she sat down and began to cry again.
+
+'You ought to be ashamed of yourself,' said Alice, 'a great girl like
+you,' (she might well say this), 'to go on crying in this way! Stop this
+moment, I tell you!' But she went on all the same, shedding gallons of
+tears, until there was a large pool all round her, about four inches
+deep and reaching half down the hall.
+
+After a time she heard a little pattering of feet in the distance, and
+she hastily dried her eyes to see what was coming. It was the White
+Rabbit returning, splendidly dressed, with a pair of white kid gloves in
+one hand and a large fan in the other: he came trotting along in a great
+hurry, muttering to himself as he came, 'Oh! the Duchess, the Duchess!
+Oh! won't she be savage if I've kept her waiting!' Alice felt so
+desperate that she was ready to ask help of any one; so, when the Rabbit
+came near her, she began, in a low, timid voice, 'If you please, sir--'
+The Rabbit started violently, dropped the white kid gloves and the fan,
+and skurried away into the darkness as hard as he could go.
+
+Alice took up the fan and gloves, and, as the hall was very hot, she
+kept fanning herself all the time she went on talking: 'Dear, dear! How
+queer everything is to-day! And yesterday things went on just as usual.
+I wonder if I've been changed in the night? Let me think: was I the
+same when I got up this morning? I almost think I can remember feeling a
+little different. But if I'm not the same, the next question is, Who
+in the world am I? Ah, THAT'S the great puzzle!' And she began thinking
+over all the children she knew that were of the same age as herself, to
+see if she could have been changed for any of them.
+
+'I'm sure I'm not Ada,' she said, 'for her hair goes in such long
+ringlets, and mine doesn't go in ringlets at all; and I'm sure I can't
+be Mabel, for I know all sorts of things, and she, oh! she knows such a
+very little! Besides, SHE'S she, and I'm I, and--oh dear, how puzzling
+it all is! I'll try if I know all the things I used to know. Let me
+see: four times five is twelve, and four times six is thirteen, and
+four times seven is--oh dear! I shall never get to twenty at that rate!
+However, the Multiplication Table doesn't signify: let's try Geography.
+London is the capital of Paris, and Paris is the capital of Rome, and
+Rome--no, THAT'S all wrong, I'm certain! I must have been changed for
+Mabel! I'll try and say "How doth the little--"' and she crossed her
+hands on her lap as if she were saying lessons, and began to repeat it,
+but her voice sounded hoarse and strange, and the words did not come the
+same as they used to do:--
+
+     'How doth the little crocodile
+      Improve his shining tail,
+     And pour the waters of the Nile
+      On every golden scale!
+
+     'How cheerfully he seems to grin,
+      How neatly spread his claws,
+     And welcome little fishes in
+      With gently smiling jaws!'
+
+'I'm sure those are not the right words,' said poor Alice, and her eyes
+filled with tears again as she went on, 'I must be Mabel after all, and
+I shall have to go and live in that poky little house, and have next to
+no toys to play with, and oh! ever so many lessons to learn! No, I've
+made up my mind about it; if I'm Mabel, I'll stay down here! It'll be no
+use their putting their heads down and saying "Come up again, dear!" I
+shall only look up and say "Who am I then? Tell me that first, and then,
+if I like being that person, I'll come up: if not, I'll stay down here
+till I'm somebody else"--but, oh dear!' cried Alice, with a sudden burst
+of tears, 'I do wish they WOULD put their heads down! I am so VERY tired
+of being all alone here!'
+
+As she said this she looked down at her hands, and was surprised to see
+that she had put on one of the Rabbit's little white kid gloves while
+she was talking. 'How CAN I have done that?' she thought. 'I must
+be growing small again.' She got up and went to the table to measure
+herself by it, and found that, as nearly as she could guess, she was now
+about two feet high, and was going on shrinking rapidly: she soon found
+out that the cause of this was the fan she was holding, and she dropped
+it hastily, just in time to avoid shrinking away altogether.
+
+'That WAS a narrow escape!' said Alice, a good deal frightened at the
+sudden change, but very glad to find herself still in existence; 'and
+now for the garden!' and she ran with all speed back to the little door:
+but, alas! the little door was shut again, and the little golden key was
+lying on the glass table as before, 'and things are worse than ever,'
+thought the poor child, 'for I never was so small as this before, never!
+And I declare it's too bad, that it is!'
+
+As she said these words her foot slipped, and in another moment, splash!
+she was up to her chin in salt water. Her first idea was that she
+had somehow fallen into the sea, 'and in that case I can go back by
+railway,' she said to herself. (Alice had been to the seaside once in
+her life, and had come to the general conclusion, that wherever you go
+to on the English coast you find a number of bathing machines in the
+sea, some children digging in the sand with wooden spades, then a row
+of lodging houses, and behind them a railway station.) However, she soon
+made out that she was in the pool of tears which she had wept when she
+was nine feet high.
+
+'I wish I hadn't cried so much!' said Alice, as she swam about, trying
+to find her way out. 'I shall be punished for it now, I suppose, by
+being drowned in my own tears! That WILL be a queer thing, to be sure!
+However, everything is queer to-day.'
+
+Just then she heard something splashing about in the pool a little way
+off, and she swam nearer to make out what it was: at first she thought
+it must be a walrus or hippopotamus, but then she remembered how small
+she was now, and she soon made out that it was only a mouse that had
+slipped in like herself.
+
+'Would it be of any use, now,' thought Alice, 'to speak to this mouse?
+Everything is so out-of-the-way down here, that I should think very
+likely it can talk: at any rate, there's no harm in trying.' So she
+began: 'O Mouse, do you know the way out of this pool? I am very tired
+of swimming about here, O Mouse!' (Alice thought this must be the right
+way of speaking to a mouse: she had never done such a thing before, but
+she remembered having seen in her brother's Latin Grammar, 'A mouse--of
+a mouse--to a mouse--a mouse--O mouse!') The Mouse looked at her rather
+inquisitively, and seemed to her to wink with one of its little eyes,
+but it said nothing.
+
+'Perhaps it doesn't understand English,' thought Alice; 'I daresay it's
+a French mouse, come over with William the Conqueror.' (For, with all
+her knowledge of history, Alice had no very clear notion how long ago
+anything had happened.) So she began again: 'Ou est ma chatte?' which
+was the first sentence in her French lesson-book. The Mouse gave a
+sudden leap out of the water, and seemed to quiver all over with fright.
+'Oh, I beg your pardon!' cried Alice hastily, afraid that she had hurt
+the poor animal's feelings. 'I quite forgot you didn't like cats.'
+
+'Not like cats!' cried the Mouse, in a shrill, passionate voice. 'Would
+YOU like cats if you were me?'
+
+'Well, perhaps not,' said Alice in a soothing tone: 'don't be angry
+about it. And yet I wish I could show you our cat Dinah: I think you'd
+take a fancy to cats if you could only see her. She is such a dear quiet
+thing,' Alice went on, half to herself, as she swam lazily about in the
+pool, 'and she sits purring so nicely by the fire, licking her paws and
+washing her face--and she is such a nice soft thing to nurse--and she's
+such a capital one for catching mice--oh, I beg your pardon!' cried
+Alice again, for this time the Mouse was bristling all over, and she
+felt certain it must be really offended. 'We won't talk about her any
+more if you'd rather not.'
+
+'We indeed!' cried the Mouse, who was trembling down to the end of his
+tail. 'As if I would talk on such a subject! Our family always HATED
+cats: nasty, low, vulgar things! Don't let me hear the name again!'
+
+'I won't indeed!' said Alice, in a great hurry to change the subject of
+conversation. 'Are you--are you fond--of--of dogs?' The Mouse did not
+answer, so Alice went on eagerly: 'There is such a nice little dog near
+our house I should like to show you! A little bright-eyed terrier, you
+know, with oh, such long curly brown hair! And it'll fetch things when
+you throw them, and it'll sit up and beg for its dinner, and all sorts
+of things--I can't remember half of them--and it belongs to a farmer,
+you know, and he says it's so useful, it's worth a hundred pounds! He
+says it kills all the rats and--oh dear!' cried Alice in a sorrowful
+tone, 'I'm afraid I've offended it again!' For the Mouse was swimming
+away from her as hard as it could go, and making quite a commotion in
+the pool as it went.
+
+So she called softly after it, 'Mouse dear! Do come back again, and we
+won't talk about cats or dogs either, if you don't like them!' When the
+Mouse heard this, it turned round and swam slowly back to her: its
+face was quite pale (with passion, Alice thought), and it said in a low
+trembling voice, 'Let us get to the shore, and then I'll tell you my
+history, and you'll understand why it is I hate cats and dogs.'
+
+It was high time to go, for the pool was getting quite crowded with the
+birds and animals that had fallen into it: there were a Duck and a Dodo,
+a Lory and an Eaglet, and several other curious creatures. Alice led the
+way, and the whole party swam to the shore.
+
+
+
+
+CHAPTER III. A Caucus-Race and a Long Tale
+
+They were indeed a queer-looking party that assembled on the bank--the
+birds with draggled feathers, the animals with their fur clinging close
+to them, and all dripping wet, cross, and uncomfortable.
+
+The first question of course was, how to get dry again: they had a
+consultation about this, and after a few minutes it seemed quite natural
+to Alice to find herself talking familiarly with them, as if she had
+known them all her life. Indeed, she had quite a long argument with the
+Lory, who at last turned sulky, and would only say, 'I am older than
+you, and must know better'; and this Alice would not allow without
+knowing how old it was, and, as the Lory positively refused to tell its
+age, there was no more to be said.
+
+At last the Mouse, who seemed to be a person of authority among them,
+called out, 'Sit down, all of you, and listen to me! I'LL soon make you
+dry enough!' They all sat down at once, in a large ring, with the Mouse
+in the middle. Alice kept her eyes anxiously fixed on it, for she felt
+sure she would catch a bad cold if she did not get dry very soon.
+
+'Ahem!' said the Mouse with an important air, 'are you all ready? This
+is the driest thing I know. Silence all round, if you please! "William
+the Conqueror, whose cause was favoured by the pope, was soon submitted
+to by the English, who wanted leaders, and had been of late much
+accustomed to usurpation and conquest. Edwin and Morcar, the earls of
+Mercia and Northumbria--"'
+
+'Ugh!' said the Lory, with a shiver.
+
+'I beg your pardon!' said the Mouse, frowning, but very politely: 'Did
+you speak?'
+
+'Not I!' said the Lory hastily.
+
+'I thought you did,' said the Mouse. '--I proceed. "Edwin and Morcar,
+the earls of Mercia and Northumbria, declared for him: and even Stigand,
+the patriotic archbishop of Canterbury, found it advisable--"'
+
+'Found WHAT?' said the Duck.
+
+'Found IT,' the Mouse replied rather crossly: 'of course you know what
+"it" means.'
+
+'I know what "it" means well enough, when I find a thing,' said the
+Duck: 'it's generally a frog or a worm. The question is, what did the
+archbishop find?'
+
+The Mouse did not notice this question, but hurriedly went on, '"--found
+it advisable to go with Edgar Atheling to meet William and offer him the
+crown. William's conduct at first was moderate. But the insolence of his
+Normans--" How are you getting on now, my dear?' it continued, turning
+to Alice as it spoke.
+
+'As wet as ever,' said Alice in a melancholy tone: 'it doesn't seem to
+dry me at all.'
+
+'In that case,' said the Dodo solemnly, rising to its feet, 'I move
+that the meeting adjourn, for the immediate adoption of more energetic
+remedies--'
+
+'Speak English!' said the Eaglet. 'I don't know the meaning of half
+those long words, and, what's more, I don't believe you do either!' And
+the Eaglet bent down its head to hide a smile: some of the other birds
+tittered audibly.
+
+'What I was going to say,' said the Dodo in an offended tone, 'was, that
+the best thing to get us dry would be a Caucus-race.'
+
+'What IS a Caucus-race?' said Alice; not that she wanted much to know,
+but the Dodo had paused as if it thought that SOMEBODY ought to speak,
+and no one else seemed inclined to say anything.
+
+'Why,' said the Dodo, 'the best way to explain it is to do it.' (And, as
+you might like to try the thing yourself, some winter day, I will tell
+you how the Dodo managed it.)
+
+First it marked out a race-course, in a sort of circle, ('the exact
+shape doesn't matter,' it said,) and then all the party were placed
+along the course, here and there. There was no 'One, two, three, and
+away,' but they began running when they liked, and left off when they
+liked, so that it was not easy to know when the race was over. However,
+when they had been running half an hour or so, and were quite dry again,
+the Dodo suddenly called out 'The race is over!' and they all crowded
+round it, panting, and asking, 'But who has won?'
+
+This question the Dodo could not answer without a great deal of thought,
+and it sat for a long time with one finger pressed upon its forehead
+(the position in which you usually see Shakespeare, in the pictures
+of him), while the rest waited in silence. At last the Dodo said,
+'EVERYBODY has won, and all must have prizes.'
+
+'But who is to give the prizes?' quite a chorus of voices asked.
+
+'Why, SHE, of course,' said the Dodo, pointing to Alice with one finger;
+and the whole party at once crowded round her, calling out in a confused
+way, 'Prizes! Prizes!'
+
+Alice had no idea what to do, and in despair she put her hand in her
+pocket, and pulled out a box of comfits, (luckily the salt water had
+not got into it), and handed them round as prizes. There was exactly one
+a-piece all round.
+
+'But she must have a prize herself, you know,' said the Mouse.
+
+'Of course,' the Dodo replied very gravely. 'What else have you got in
+your pocket?' he went on, turning to Alice.
+
+'Only a thimble,' said Alice sadly.
+
+'Hand it over here,' said the Dodo.
+
+Then they all crowded round her once more, while the Dodo solemnly
+presented the thimble, saying 'We beg your acceptance of this elegant
+thimble'; and, when it had finished this short speech, they all cheered.
+
+Alice thought the whole thing very absurd, but they all looked so grave
+that she did not dare to laugh; and, as she could not think of anything
+to say, she simply bowed, and took the thimble, looking as solemn as she
+could.
+
+The next thing was to eat the comfits: this caused some noise and
+confusion, as the large birds complained that they could not taste
+theirs, and the small ones choked and had to be patted on the back.
+However, it was over at last, and they sat down again in a ring, and
+begged the Mouse to tell them something more.
+
+'You promised to tell me your history, you know,' said Alice, 'and why
+it is you hate--C and D,' she added in a whisper, half afraid that it
+would be offended again.
+
+'Mine is a long and a sad tale!' said the Mouse, turning to Alice, and
+sighing.
+
+'It IS a long tail, certainly,' said Alice, looking down with wonder at
+the Mouse's tail; 'but why do you call it sad?' And she kept on puzzling
+about it while the Mouse was speaking, so that her idea of the tale was
+something like this:--
+
+         'Fury said to a
+         mouse, That he
+        met in the
+       house,
+     "Let us
+      both go to
+       law: I will
+        prosecute
+         YOU.--Come,
+           I'll take no
+           denial; We
+          must have a
+        trial: For
+      really this
+     morning I've
+    nothing
+    to do."
+     Said the
+      mouse to the
+       cur, "Such
+        a trial,
+         dear Sir,
+            With
+          no jury
+        or judge,
+       would be
+      wasting
+      our
+      breath."
+       "I'll be
+        judge, I'll
+         be jury,"
+            Said
+         cunning
+          old Fury:
+          "I'll
+          try the
+            whole
+            cause,
+              and
+           condemn
+           you
+          to
+           death."'
+
+
+'You are not attending!' said the Mouse to Alice severely. 'What are you
+thinking of?'
+
+'I beg your pardon,' said Alice very humbly: 'you had got to the fifth
+bend, I think?'
+
+'I had NOT!' cried the Mouse, sharply and very angrily.
+
+'A knot!' said Alice, always ready to make herself useful, and looking
+anxiously about her. 'Oh, do let me help to undo it!'
+
+'I shall do nothing of the sort,' said the Mouse, getting up and walking
+away. 'You insult me by talking such nonsense!'
+
+'I didn't mean it!' pleaded poor Alice. 'But you're so easily offended,
+you know!'
+
+The Mouse only growled in reply.
+
+'Please come back and finish your story!' Alice called after it; and the
+others all joined in chorus, 'Yes, please do!' but the Mouse only shook
+its head impatiently, and walked a little quicker.
+
+'What a pity it wouldn't stay!' sighed the Lory, as soon as it was quite
+out of sight; and an old Crab took the opportunity of saying to her
+daughter 'Ah, my dear! Let this be a lesson to you never to lose
+YOUR temper!' 'Hold your tongue, Ma!' said the young Crab, a little
+snappishly. 'You're enough to try the patience of an oyster!'
+
+'I wish I had our Dinah here, I know I do!' said Alice aloud, addressing
+nobody in particular. 'She'd soon fetch it back!'
+
+'And who is Dinah, if I might venture to ask the question?' said the
+Lory.
+
+Alice replied eagerly, for she was always ready to talk about her pet:
+'Dinah's our cat. And she's such a capital one for catching mice you
+can't think! And oh, I wish you could see her after the birds! Why,
+she'll eat a little bird as soon as look at it!'
+
+This speech caused a remarkable sensation among the party. Some of the
+birds hurried off at once: one old Magpie began wrapping itself up very
+carefully, remarking, 'I really must be getting home; the night-air
+doesn't suit my throat!' and a Canary called out in a trembling voice to
+its children, 'Come away, my dears! It's high time you were all in bed!'
+On various pretexts they all moved off, and Alice was soon left alone.
+
+'I wish I hadn't mentioned Dinah!' she said to herself in a melancholy
+tone. 'Nobody seems to like her, down here, and I'm sure she's the best
+cat in the world! Oh, my dear Dinah! I wonder if I shall ever see you
+any more!' And here poor Alice began to cry again, for she felt very
+lonely and low-spirited. In a little while, however, she again heard
+a little pattering of footsteps in the distance, and she looked up
+eagerly, half hoping that the Mouse had changed his mind, and was coming
+back to finish his story.
+
+
+
+
+CHAPTER IV. The Rabbit Sends in a Little Bill
+
+It was the White Rabbit, trotting slowly back again, and looking
+anxiously about as it went, as if it had lost something; and she heard
+it muttering to itself 'The Duchess! The Duchess! Oh my dear paws! Oh
+my fur and whiskers! She'll get me executed, as sure as ferrets are
+ferrets! Where CAN I have dropped them, I wonder?' Alice guessed in a
+moment that it was looking for the fan and the pair of white kid gloves,
+and she very good-naturedly began hunting about for them, but they were
+nowhere to be seen--everything seemed to have changed since her swim in
+the pool, and the great hall, with the glass table and the little door,
+had vanished completely.
+
+Very soon the Rabbit noticed Alice, as she went hunting about, and
+called out to her in an angry tone, 'Why, Mary Ann, what ARE you doing
+out here? Run home this moment, and fetch me a pair of gloves and a fan!
+Quick, now!' And Alice was so much frightened that she ran off at once
+in the direction it pointed to, without trying to explain the mistake it
+had made.
+
+'He took me for his housemaid,' she said to herself as she ran. 'How
+surprised he'll be when he finds out who I am! But I'd better take him
+his fan and gloves--that is, if I can find them.' As she said this, she
+came upon a neat little house, on the door of which was a bright brass
+plate with the name 'W. RABBIT' engraved upon it. She went in without
+knocking, and hurried upstairs, in great fear lest she should meet the
+real Mary Ann, and be turned out of the house before she had found the
+fan and gloves.
+
+'How queer it seems,' Alice said to herself, 'to be going messages for
+a rabbit! I suppose Dinah'll be sending me on messages next!' And she
+began fancying the sort of thing that would happen: '"Miss Alice! Come
+here directly, and get ready for your walk!" "Coming in a minute,
+nurse! But I've got to see that the mouse doesn't get out." Only I don't
+think,' Alice went on, 'that they'd let Dinah stop in the house if it
+began ordering people about like that!'
+
+By this time she had found her way into a tidy little room with a table
+in the window, and on it (as she had hoped) a fan and two or three pairs
+of tiny white kid gloves: she took up the fan and a pair of the gloves,
+and was just going to leave the room, when her eye fell upon a little
+bottle that stood near the looking-glass. There was no label this time
+with the words 'DRINK ME,' but nevertheless she uncorked it and put it
+to her lips. 'I know SOMETHING interesting is sure to happen,' she said
+to herself, 'whenever I eat or drink anything; so I'll just see what
+this bottle does. I do hope it'll make me grow large again, for really
+I'm quite tired of being such a tiny little thing!'
+
+It did so indeed, and much sooner than she had expected: before she had
+drunk half the bottle, she found her head pressing against the ceiling,
+and had to stoop to save her neck from being broken. She hastily put
+down the bottle, saying to herself 'That's quite enough--I hope I shan't
+grow any more--As it is, I can't get out at the door--I do wish I hadn't
+drunk quite so much!'
+
+Alas! it was too late to wish that! She went on growing, and growing,
+and very soon had to kneel down on the floor: in another minute there
+was not even room for this, and she tried the effect of lying down with
+one elbow against the door, and the other arm curled round her head.
+Still she went on growing, and, as a last resource, she put one arm out
+of the window, and one foot up the chimney, and said to herself 'Now I
+can do no more, whatever happens. What WILL become of me?'
+
+Luckily for Alice, the little magic bottle had now had its full effect,
+and she grew no larger: still it was very uncomfortable, and, as there
+seemed to be no sort of chance of her ever getting out of the room
+again, no wonder she felt unhappy.
+
+'It was much pleasanter at home,' thought poor Alice, 'when one wasn't
+always growing larger and smaller, and being ordered about by mice and
+rabbits. I almost wish I hadn't gone down that rabbit-hole--and yet--and
+yet--it's rather curious, you know, this sort of life! I do wonder what
+CAN have happened to me! When I used to read fairy-tales, I fancied that
+kind of thing never happened, and now here I am in the middle of one!
+There ought to be a book written about me, that there ought! And when I
+grow up, I'll write one--but I'm grown up now,' she added in a sorrowful
+tone; 'at least there's no room to grow up any more HERE.'
+
+'But then,' thought Alice, 'shall I NEVER get any older than I am
+now? That'll be a comfort, one way--never to be an old woman--but
+then--always to have lessons to learn! Oh, I shouldn't like THAT!'
+
+'Oh, you foolish Alice!' she answered herself. 'How can you learn
+lessons in here? Why, there's hardly room for YOU, and no room at all
+for any lesson-books!'
+
+And so she went on, taking first one side and then the other, and making
+quite a conversation of it altogether; but after a few minutes she heard
+a voice outside, and stopped to listen.
+
+'Mary Ann! Mary Ann!' said the voice. 'Fetch me my gloves this moment!'
+Then came a little pattering of feet on the stairs. Alice knew it was
+the Rabbit coming to look for her, and she trembled till she shook the
+house, quite forgetting that she was now about a thousand times as large
+as the Rabbit, and had no reason to be afraid of it.
+
+Presently the Rabbit came up to the door, and tried to open it; but, as
+the door opened inwards, and Alice's elbow was pressed hard against it,
+that attempt proved a failure. Alice heard it say to itself 'Then I'll
+go round and get in at the window.'
+
+'THAT you won't' thought Alice, and, after waiting till she fancied
+she heard the Rabbit just under the window, she suddenly spread out her
+hand, and made a snatch in the air. She did not get hold of anything,
+but she heard a little shriek and a fall, and a crash of broken glass,
+from which she concluded that it was just possible it had fallen into a
+cucumber-frame, or something of the sort.
+
+Next came an angry voice--the Rabbit's--'Pat! Pat! Where are you?' And
+then a voice she had never heard before, 'Sure then I'm here! Digging
+for apples, yer honour!'
+
+'Digging for apples, indeed!' said the Rabbit angrily. 'Here! Come and
+help me out of THIS!' (Sounds of more broken glass.)
+
+'Now tell me, Pat, what's that in the window?'
+
+'Sure, it's an arm, yer honour!' (He pronounced it 'arrum.')
+
+'An arm, you goose! Who ever saw one that size? Why, it fills the whole
+window!'
+
+'Sure, it does, yer honour: but it's an arm for all that.'
+
+'Well, it's got no business there, at any rate: go and take it away!'
+
+There was a long silence after this, and Alice could only hear whispers
+now and then; such as, 'Sure, I don't like it, yer honour, at all, at
+all!' 'Do as I tell you, you coward!' and at last she spread out her
+hand again, and made another snatch in the air. This time there were
+TWO little shrieks, and more sounds of broken glass. 'What a number of
+cucumber-frames there must be!' thought Alice. 'I wonder what they'll do
+next! As for pulling me out of the window, I only wish they COULD! I'm
+sure I don't want to stay in here any longer!'
+
+She waited for some time without hearing anything more: at last came a
+rumbling of little cartwheels, and the sound of a good many voices
+all talking together: she made out the words: 'Where's the other
+ladder?--Why, I hadn't to bring but one; Bill's got the other--Bill!
+fetch it here, lad!--Here, put 'em up at this corner--No, tie 'em
+together first--they don't reach half high enough yet--Oh! they'll
+do well enough; don't be particular--Here, Bill! catch hold of this
+rope--Will the roof bear?--Mind that loose slate--Oh, it's coming
+down! Heads below!' (a loud crash)--'Now, who did that?--It was Bill, I
+fancy--Who's to go down the chimney?--Nay, I shan't! YOU do it!--That I
+won't, then!--Bill's to go down--Here, Bill! the master says you're to
+go down the chimney!'
+
+'Oh! So Bill's got to come down the chimney, has he?' said Alice to
+herself. 'Shy, they seem to put everything upon Bill! I wouldn't be in
+Bill's place for a good deal: this fireplace is narrow, to be sure; but
+I THINK I can kick a little!'
+
+She drew her foot as far down the chimney as she could, and waited
+till she heard a little animal (she couldn't guess of what sort it was)
+scratching and scrambling about in the chimney close above her: then,
+saying to herself 'This is Bill,' she gave one sharp kick, and waited to
+see what would happen next.
+
+The first thing she heard was a general chorus of 'There goes Bill!'
+then the Rabbit's voice along--'Catch him, you by the hedge!' then
+silence, and then another confusion of voices--'Hold up his head--Brandy
+now--Don't choke him--How was it, old fellow? What happened to you? Tell
+us all about it!'
+
+Last came a little feeble, squeaking voice, ('That's Bill,' thought
+Alice,) 'Well, I hardly know--No more, thank ye; I'm better now--but I'm
+a deal too flustered to tell you--all I know is, something comes at me
+like a Jack-in-the-box, and up I goes like a sky-rocket!'
+
+'So you did, old fellow!' said the others.
+
+'We must burn the house down!' said the Rabbit's voice; and Alice called
+out as loud as she could, 'If you do. I'll set Dinah at you!'
+
+There was a dead silence instantly, and Alice thought to herself, 'I
+wonder what they WILL do next! If they had any sense, they'd take the
+roof off.' After a minute or two, they began moving about again, and
+Alice heard the Rabbit say, 'A barrowful will do, to begin with.'
+
+'A barrowful of WHAT?' thought Alice; but she had not long to doubt,
+for the next moment a shower of little pebbles came rattling in at the
+window, and some of them hit her in the face. 'I'll put a stop to this,'
+she said to herself, and shouted out, 'You'd better not do that again!'
+which produced another dead silence.
+
+Alice noticed with some surprise that the pebbles were all turning into
+little cakes as they lay on the floor, and a bright idea came into her
+head. 'If I eat one of these cakes,' she thought, 'it's sure to make
+SOME change in my size; and as it can't possibly make me larger, it must
+make me smaller, I suppose.'
+
+So she swallowed one of the cakes, and was delighted to find that she
+began shrinking directly. As soon as she was small enough to get through
+the door, she ran out of the house, and found quite a crowd of little
+animals and birds waiting outside. The poor little Lizard, Bill, was
+in the middle, being held up by two guinea-pigs, who were giving it
+something out of a bottle. They all made a rush at Alice the moment she
+appeared; but she ran off as hard as she could, and soon found herself
+safe in a thick wood.
+
+'The first thing I've got to do,' said Alice to herself, as she wandered
+about in the wood, 'is to grow to my right size again; and the second
+thing is to find my way into that lovely garden. I think that will be
+the best plan.'
+
+It sounded an excellent plan, no doubt, and very neatly and simply
+arranged; the only difficulty was, that she had not the smallest idea
+how to set about it; and while she was peering about anxiously among
+the trees, a little sharp bark just over her head made her look up in a
+great hurry.
+
+An enormous puppy was looking down at her with large round eyes, and
+feebly stretching out one paw, trying to touch her. 'Poor little thing!'
+said Alice, in a coaxing tone, and she tried hard to whistle to it; but
+she was terribly frightened all the time at the thought that it might be
+hungry, in which case it would be very likely to eat her up in spite of
+all her coaxing.
+
+Hardly knowing what she did, she picked up a little bit of stick, and
+held it out to the puppy; whereupon the puppy jumped into the air off
+all its feet at once, with a yelp of delight, and rushed at the stick,
+and made believe to worry it; then Alice dodged behind a great thistle,
+to keep herself from being run over; and the moment she appeared on the
+other side, the puppy made another rush at the stick, and tumbled head
+over heels in its hurry to get hold of it; then Alice, thinking it was
+very like having a game of play with a cart-horse, and expecting every
+moment to be trampled under its feet, ran round the thistle again; then
+the puppy began a series of short charges at the stick, running a very
+little way forwards each time and a long way back, and barking hoarsely
+all the while, till at last it sat down a good way off, panting, with
+its tongue hanging out of its mouth, and its great eyes half shut.
+
+This seemed to Alice a good opportunity for making her escape; so she
+set off at once, and ran till she was quite tired and out of breath, and
+till the puppy's bark sounded quite faint in the distance.
+
+'And yet what a dear little puppy it was!' said Alice, as she leant
+against a buttercup to rest herself, and fanned herself with one of the
+leaves: 'I should have liked teaching it tricks very much, if--if I'd
+only been the right size to do it! Oh dear! I'd nearly forgotten that
+I've got to grow up again! Let me see--how IS it to be managed? I
+suppose I ought to eat or drink something or other; but the great
+question is, what?'
+
+The great question certainly was, what? Alice looked all round her at
+the flowers and the blades of grass, but she did not see anything that
+looked like the right thing to eat or drink under the circumstances.
+There was a large mushroom growing near her, about the same height as
+herself; and when she had looked under it, and on both sides of it, and
+behind it, it occurred to her that she might as well look and see what
+was on the top of it.
+
+She stretched herself up on tiptoe, and peeped over the edge of the
+mushroom, and her eyes immediately met those of a large caterpillar,
+that was sitting on the top with its arms folded, quietly smoking a long
+hookah, and taking not the smallest notice of her or of anything else.
+
+
+
+
+CHAPTER V. Advice from a Caterpillar
+
+The Caterpillar and Alice looked at each other for some time in silence:
+at last the Caterpillar took the hookah out of its mouth, and addressed
+her in a languid, sleepy voice.
+
+'Who are YOU?' said the Caterpillar.
+
+This was not an encouraging opening for a conversation. Alice replied,
+rather shyly, 'I--I hardly know, sir, just at present--at least I know
+who I WAS when I got up this morning, but I think I must have been
+changed several times since then.'
+
+'What do you mean by that?' said the Caterpillar sternly. 'Explain
+yourself!'
+
+'I can't explain MYSELF, I'm afraid, sir' said Alice, 'because I'm not
+myself, you see.'
+
+'I don't see,' said the Caterpillar.
+
+'I'm afraid I can't put it more clearly,' Alice replied very politely,
+'for I can't understand it myself to begin with; and being so many
+different sizes in a day is very confusing.'
+
+'It isn't,' said the Caterpillar.
+
+'Well, perhaps you haven't found it so yet,' said Alice; 'but when you
+have to turn into a chrysalis--you will some day, you know--and then
+after that into a butterfly, I should think you'll feel it a little
+queer, won't you?'
+
+'Not a bit,' said the Caterpillar.
+
+'Well, perhaps your feelings may be different,' said Alice; 'all I know
+is, it would feel very queer to ME.'
+
+'You!' said the Caterpillar contemptuously. 'Who are YOU?'
+
+Which brought them back again to the beginning of the conversation.
+Alice felt a little irritated at the Caterpillar's making such VERY
+short remarks, and she drew herself up and said, very gravely, 'I think,
+you ought to tell me who YOU are, first.'
+
+'Why?' said the Caterpillar.
+
+Here was another puzzling question; and as Alice could not think of any
+good reason, and as the Caterpillar seemed to be in a VERY unpleasant
+state of mind, she turned away.
+
+'Come back!' the Caterpillar called after her. 'I've something important
+to say!'
+
+This sounded promising, certainly: Alice turned and came back again.
+
+'Keep your temper,' said the Caterpillar.
+
+'Is that all?' said Alice, swallowing down her anger as well as she
+could.
+
+'No,' said the Caterpillar.
+
+Alice thought she might as well wait, as she had nothing else to do, and
+perhaps after all it might tell her something worth hearing. For some
+minutes it puffed away without speaking, but at last it unfolded its
+arms, took the hookah out of its mouth again, and said, 'So you think
+you're changed, do you?'
+
+'I'm afraid I am, sir,' said Alice; 'I can't remember things as I
+used--and I don't keep the same size for ten minutes together!'
+
+'Can't remember WHAT things?' said the Caterpillar.
+
+'Well, I've tried to say "HOW DOTH THE LITTLE BUSY BEE," but it all came
+different!' Alice replied in a very melancholy voice.
+
+'Repeat, "YOU ARE OLD, FATHER WILLIAM,"' said the Caterpillar.
+
+Alice folded her hands, and began:--
+
+   'You are old, Father William,' the young man said,
+    'And your hair has become very white;
+   And yet you incessantly stand on your head--
+    Do you think, at your age, it is right?'
+
+   'In my youth,' Father William replied to his son,
+    'I feared it might injure the brain;
+   But, now that I'm perfectly sure I have none,
+    Why, I do it again and again.'
+
+   'You are old,' said the youth, 'as I mentioned before,
+    And have grown most uncommonly fat;
+   Yet you turned a back-somersault in at the door--
+    Pray, what is the reason of that?'
+
+   'In my youth,' said the sage, as he shook his grey locks,
+    'I kept all my limbs very supple
+   By the use of this ointment--one shilling the box--
+    Allow me to sell you a couple?'
+
+   'You are old,' said the youth, 'and your jaws are too weak
+    For anything tougher than suet;
+   Yet you finished the goose, with the bones and the beak--
+    Pray how did you manage to do it?'
+
+   'In my youth,' said his father, 'I took to the law,
+    And argued each case with my wife;
+   And the muscular strength, which it gave to my jaw,
+    Has lasted the rest of my life.'
+
+   'You are old,' said the youth, 'one would hardly suppose
+    That your eye was as steady as ever;
+   Yet you balanced an eel on the end of your nose--
+    What made you so awfully clever?'
+
+   'I have answered three questions, and that is enough,'
+    Said his father; 'don't give yourself airs!
+   Do you think I can listen all day to such stuff?
+    Be off, or I'll kick you down stairs!'
+
+
+'That is not said right,' said the Caterpillar.
+
+'Not QUITE right, I'm afraid,' said Alice, timidly; 'some of the words
+have got altered.'
+
+'It is wrong from beginning to end,' said the Caterpillar decidedly, and
+there was silence for some minutes.
+
+The Caterpillar was the first to speak.
+
+'What size do you want to be?' it asked.
+
+'Oh, I'm not particular as to size,' Alice hastily replied; 'only one
+doesn't like changing so often, you know.'
+
+'I DON'T know,' said the Caterpillar.
+
+Alice said nothing: she had never been so much contradicted in her life
+before, and she felt that she was losing her temper.
+
+'Are you content now?' said the Caterpillar.
+
+'Well, I should like to be a LITTLE larger, sir, if you wouldn't mind,'
+said Alice: 'three inches is such a wretched height to be.'
+
+'It is a very good height indeed!' said the Caterpillar angrily, rearing
+itself upright as it spoke (it was exactly three inches high).
+
+'But I'm not used to it!' pleaded poor Alice in a piteous tone. And
+she thought of herself, 'I wish the creatures wouldn't be so easily
+offended!'
+
+'You'll get used to it in time,' said the Caterpillar; and it put the
+hookah into its mouth and began smoking again.
+
+This time Alice waited patiently until it chose to speak again. In
+a minute or two the Caterpillar took the hookah out of its mouth
+and yawned once or twice, and shook itself. Then it got down off the
+mushroom, and crawled away in the grass, merely remarking as it went,
+'One side will make you grow taller, and the other side will make you
+grow shorter.'
+
+'One side of WHAT? The other side of WHAT?' thought Alice to herself.
+
+'Of the mushroom,' said the Caterpillar, just as if she had asked it
+aloud; and in another moment it was out of sight.
+
+Alice remained looking thoughtfully at the mushroom for a minute, trying
+to make out which were the two sides of it; and as it was perfectly
+round, she found this a very difficult question. However, at last she
+stretched her arms round it as far as they would go, and broke off a bit
+of the edge with each hand.
+
+'And now which is which?' she said to herself, and nibbled a little of
+the right-hand bit to try the effect: the next moment she felt a violent
+blow underneath her chin: it had struck her foot!
+
+She was a good deal frightened by this very sudden change, but she felt
+that there was no time to be lost, as she was shrinking rapidly; so she
+set to work at once to eat some of the other bit. Her chin was pressed
+so closely against her foot, that there was hardly room to open her
+mouth; but she did it at last, and managed to swallow a morsel of the
+lefthand bit.
+
+
+  *    *    *    *    *    *    *
+
+    *    *    *    *    *    *
+
+  *    *    *    *    *    *    *
+
+'Come, my head's free at last!' said Alice in a tone of delight, which
+changed into alarm in another moment, when she found that her shoulders
+were nowhere to be found: all she could see, when she looked down, was
+an immense length of neck, which seemed to rise like a stalk out of a
+sea of green leaves that lay far below her.
+
+'What CAN all that green stuff be?' said Alice. 'And where HAVE my
+shoulders got to? And oh, my poor hands, how is it I can't see you?'
+She was moving them about as she spoke, but no result seemed to follow,
+except a little shaking among the distant green leaves.
+
+As there seemed to be no chance of getting her hands up to her head, she
+tried to get her head down to them, and was delighted to find that her
+neck would bend about easily in any direction, like a serpent. She had
+just succeeded in curving it down into a graceful zigzag, and was going
+to dive in among the leaves, which she found to be nothing but the tops
+of the trees under which she had been wandering, when a sharp hiss made
+her draw back in a hurry: a large pigeon had flown into her face, and
+was beating her violently with its wings.
+
+'Serpent!' screamed the Pigeon.
+
+'I'm NOT a serpent!' said Alice indignantly. 'Let me alone!'
+
+'Serpent, I say again!' repeated the Pigeon, but in a more subdued tone,
+and added with a kind of sob, 'I've tried every way, and nothing seems
+to suit them!'
+
+'I haven't the least idea what you're talking about,' said Alice.
+
+'I've tried the roots of trees, and I've tried banks, and I've tried
+hedges,' the Pigeon went on, without attending to her; 'but those
+serpents! There's no pleasing them!'
+
+Alice was more and more puzzled, but she thought there was no use in
+saying anything more till the Pigeon had finished.
+
+'As if it wasn't trouble enough hatching the eggs,' said the Pigeon;
+'but I must be on the look-out for serpents night and day! Why, I
+haven't had a wink of sleep these three weeks!'
+
+'I'm very sorry you've been annoyed,' said Alice, who was beginning to
+see its meaning.
+
+'And just as I'd taken the highest tree in the wood,' continued the
+Pigeon, raising its voice to a shriek, 'and just as I was thinking I
+should be free of them at last, they must needs come wriggling down from
+the sky! Ugh, Serpent!'
+
+'But I'm NOT a serpent, I tell you!' said Alice. 'I'm a--I'm a--'
+
+'Well! WHAT are you?' said the Pigeon. 'I can see you're trying to
+invent something!'
+
+'I--I'm a little girl,' said Alice, rather doubtfully, as she remembered
+the number of changes she had gone through that day.
+
+'A likely story indeed!' said the Pigeon in a tone of the deepest
+contempt. 'I've seen a good many little girls in my time, but never ONE
+with such a neck as that! No, no! You're a serpent; and there's no use
+denying it. I suppose you'll be telling me next that you never tasted an
+egg!'
+
+'I HAVE tasted eggs, certainly,' said Alice, who was a very truthful
+child; 'but little girls eat eggs quite as much as serpents do, you
+know.'
+
+'I don't believe it,' said the Pigeon; 'but if they do, why then they're
+a kind of serpent, that's all I can say.'
+
+This was such a new idea to Alice, that she was quite silent for a
+minute or two, which gave the Pigeon the opportunity of adding, 'You're
+looking for eggs, I know THAT well enough; and what does it matter to me
+whether you're a little girl or a serpent?'
+
+'It matters a good deal to ME,' said Alice hastily; 'but I'm not looking
+for eggs, as it happens; and if I was, I shouldn't want YOURS: I don't
+like them raw.'
+
+'Well, be off, then!' said the Pigeon in a sulky tone, as it settled
+down again into its nest. Alice crouched down among the trees as well as
+she could, for her neck kept getting entangled among the branches, and
+every now and then she had to stop and untwist it. After a while she
+remembered that she still held the pieces of mushroom in her hands, and
+she set to work very carefully, nibbling first at one and then at the
+other, and growing sometimes taller and sometimes shorter, until she had
+succeeded in bringing herself down to her usual height.
+
+It was so long since she had been anything near the right size, that it
+felt quite strange at first; but she got used to it in a few minutes,
+and began talking to herself, as usual. 'Come, there's half my plan done
+now! How puzzling all these changes are! I'm never sure what I'm going
+to be, from one minute to another! However, I've got back to my right
+size: the next thing is, to get into that beautiful garden--how IS that
+to be done, I wonder?' As she said this, she came suddenly upon an open
+place, with a little house in it about four feet high. 'Whoever lives
+there,' thought Alice, 'it'll never do to come upon them THIS size: why,
+I should frighten them out of their wits!' So she began nibbling at the
+righthand bit again, and did not venture to go near the house till she
+had brought herself down to nine inches high.
+
+
+
+
+CHAPTER VI. Pig and Pepper
+
+For a minute or two she stood looking at the house, and wondering what
+to do next, when suddenly a footman in livery came running out of the
+wood--(she considered him to be a footman because he was in livery:
+otherwise, judging by his face only, she would have called him a
+fish)--and rapped loudly at the door with his knuckles. It was opened
+by another footman in livery, with a round face, and large eyes like a
+frog; and both footmen, Alice noticed, had powdered hair that curled all
+over their heads. She felt very curious to know what it was all about,
+and crept a little way out of the wood to listen.
+
+The Fish-Footman began by producing from under his arm a great letter,
+nearly as large as himself, and this he handed over to the other,
+saying, in a solemn tone, 'For the Duchess. An invitation from the Queen
+to play croquet.' The Frog-Footman repeated, in the same solemn tone,
+only changing the order of the words a little, 'From the Queen. An
+invitation for the Duchess to play croquet.'
+
+Then they both bowed low, and their curls got entangled together.
+
+Alice laughed so much at this, that she had to run back into the
+wood for fear of their hearing her; and when she next peeped out the
+Fish-Footman was gone, and the other was sitting on the ground near the
+door, staring stupidly up into the sky.
+
+Alice went timidly up to the door, and knocked.
+
+'There's no sort of use in knocking,' said the Footman, 'and that for
+two reasons. First, because I'm on the same side of the door as you
+are; secondly, because they're making such a noise inside, no one could
+possibly hear you.' And certainly there was a most extraordinary noise
+going on within--a constant howling and sneezing, and every now and then
+a great crash, as if a dish or kettle had been broken to pieces.
+
+'Please, then,' said Alice, 'how am I to get in?'
+
+'There might be some sense in your knocking,' the Footman went on
+without attending to her, 'if we had the door between us. For instance,
+if you were INSIDE, you might knock, and I could let you out, you know.'
+He was looking up into the sky all the time he was speaking, and this
+Alice thought decidedly uncivil. 'But perhaps he can't help it,' she
+said to herself; 'his eyes are so VERY nearly at the top of his head.
+But at any rate he might answer questions.--How am I to get in?' she
+repeated, aloud.
+
+'I shall sit here,' the Footman remarked, 'till tomorrow--'
+
+At this moment the door of the house opened, and a large plate came
+skimming out, straight at the Footman's head: it just grazed his nose,
+and broke to pieces against one of the trees behind him.
+
+'--or next day, maybe,' the Footman continued in the same tone, exactly
+as if nothing had happened.
+
+'How am I to get in?' asked Alice again, in a louder tone.
+
+'ARE you to get in at all?' said the Footman. 'That's the first
+question, you know.'
+
+It was, no doubt: only Alice did not like to be told so. 'It's really
+dreadful,' she muttered to herself, 'the way all the creatures argue.
+It's enough to drive one crazy!'
+
+The Footman seemed to think this a good opportunity for repeating his
+remark, with variations. 'I shall sit here,' he said, 'on and off, for
+days and days.'
+
+'But what am I to do?' said Alice.
+
+'Anything you like,' said the Footman, and began whistling.
+
+'Oh, there's no use in talking to him,' said Alice desperately: 'he's
+perfectly idiotic!' And she opened the door and went in.
+
+The door led right into a large kitchen, which was full of smoke from
+one end to the other: the Duchess was sitting on a three-legged stool in
+the middle, nursing a baby; the cook was leaning over the fire, stirring
+a large cauldron which seemed to be full of soup.
+
+'There's certainly too much pepper in that soup!' Alice said to herself,
+as well as she could for sneezing.
+
+There was certainly too much of it in the air. Even the Duchess
+sneezed occasionally; and as for the baby, it was sneezing and howling
+alternately without a moment's pause. The only things in the kitchen
+that did not sneeze, were the cook, and a large cat which was sitting on
+the hearth and grinning from ear to ear.
+
+'Please would you tell me,' said Alice, a little timidly, for she was
+not quite sure whether it was good manners for her to speak first, 'why
+your cat grins like that?'
+
+'It's a Cheshire cat,' said the Duchess, 'and that's why. Pig!'
+
+She said the last word with such sudden violence that Alice quite
+jumped; but she saw in another moment that it was addressed to the baby,
+and not to her, so she took courage, and went on again:--
+
+'I didn't know that Cheshire cats always grinned; in fact, I didn't know
+that cats COULD grin.'
+
+'They all can,' said the Duchess; 'and most of 'em do.'
+
+'I don't know of any that do,' Alice said very politely, feeling quite
+pleased to have got into a conversation.
+
+'You don't know much,' said the Duchess; 'and that's a fact.'
+
+Alice did not at all like the tone of this remark, and thought it would
+be as well to introduce some other subject of conversation. While she
+was trying to fix on one, the cook took the cauldron of soup off the
+fire, and at once set to work throwing everything within her reach at
+the Duchess and the baby--the fire-irons came first; then followed a
+shower of saucepans, plates, and dishes. The Duchess took no notice of
+them even when they hit her; and the baby was howling so much already,
+that it was quite impossible to say whether the blows hurt it or not.
+
+'Oh, PLEASE mind what you're doing!' cried Alice, jumping up and down in
+an agony of terror. 'Oh, there goes his PRECIOUS nose'; as an unusually
+large saucepan flew close by it, and very nearly carried it off.
+
+'If everybody minded their own business,' the Duchess said in a hoarse
+growl, 'the world would go round a deal faster than it does.'
+
+'Which would NOT be an advantage,' said Alice, who felt very glad to get
+an opportunity of showing off a little of her knowledge. 'Just think of
+what work it would make with the day and night! You see the earth takes
+twenty-four hours to turn round on its axis--'
+
+'Talking of axes,' said the Duchess, 'chop off her head!'
+
+Alice glanced rather anxiously at the cook, to see if she meant to take
+the hint; but the cook was busily stirring the soup, and seemed not to
+be listening, so she went on again: 'Twenty-four hours, I THINK; or is
+it twelve? I--'
+
+'Oh, don't bother ME,' said the Duchess; 'I never could abide figures!'
+And with that she began nursing her child again, singing a sort of
+lullaby to it as she did so, and giving it a violent shake at the end of
+every line:
+
+   'Speak roughly to your little boy,
+    And beat him when he sneezes:
+   He only does it to annoy,
+    Because he knows it teases.'
+
+         CHORUS.
+
+ (In which the cook and the baby joined):--
+
+       'Wow! wow! wow!'
+
+While the Duchess sang the second verse of the song, she kept tossing
+the baby violently up and down, and the poor little thing howled so,
+that Alice could hardly hear the words:--
+
+   'I speak severely to my boy,
+    I beat him when he sneezes;
+   For he can thoroughly enjoy
+    The pepper when he pleases!'
+
+         CHORUS.
+
+       'Wow! wow! wow!'
+
+'Here! you may nurse it a bit, if you like!' the Duchess said to Alice,
+flinging the baby at her as she spoke. 'I must go and get ready to play
+croquet with the Queen,' and she hurried out of the room. The cook threw
+a frying-pan after her as she went out, but it just missed her.
+
+Alice caught the baby with some difficulty, as it was a queer-shaped
+little creature, and held out its arms and legs in all directions, 'just
+like a star-fish,' thought Alice. The poor little thing was snorting
+like a steam-engine when she caught it, and kept doubling itself up and
+straightening itself out again, so that altogether, for the first minute
+or two, it was as much as she could do to hold it.
+
+As soon as she had made out the proper way of nursing it, (which was to
+twist it up into a sort of knot, and then keep tight hold of its right
+ear and left foot, so as to prevent its undoing itself,) she carried
+it out into the open air. 'IF I don't take this child away with me,'
+thought Alice, 'they're sure to kill it in a day or two: wouldn't it be
+murder to leave it behind?' She said the last words out loud, and the
+little thing grunted in reply (it had left off sneezing by this time).
+'Don't grunt,' said Alice; 'that's not at all a proper way of expressing
+yourself.'
+
+The baby grunted again, and Alice looked very anxiously into its face to
+see what was the matter with it. There could be no doubt that it had
+a VERY turn-up nose, much more like a snout than a real nose; also its
+eyes were getting extremely small for a baby: altogether Alice did not
+like the look of the thing at all. 'But perhaps it was only sobbing,'
+she thought, and looked into its eyes again, to see if there were any
+tears.
+
+No, there were no tears. 'If you're going to turn into a pig, my dear,'
+said Alice, seriously, 'I'll have nothing more to do with you. Mind
+now!' The poor little thing sobbed again (or grunted, it was impossible
+to say which), and they went on for some while in silence.
+
+Alice was just beginning to think to herself, 'Now, what am I to do with
+this creature when I get it home?' when it grunted again, so violently,
+that she looked down into its face in some alarm. This time there could
+be NO mistake about it: it was neither more nor less than a pig, and she
+felt that it would be quite absurd for her to carry it further.
+
+So she set the little creature down, and felt quite relieved to see
+it trot away quietly into the wood. 'If it had grown up,' she said
+to herself, 'it would have made a dreadfully ugly child: but it makes
+rather a handsome pig, I think.' And she began thinking over other
+children she knew, who might do very well as pigs, and was just saying
+to herself, 'if one only knew the right way to change them--' when she
+was a little startled by seeing the Cheshire Cat sitting on a bough of a
+tree a few yards off.
+
+The Cat only grinned when it saw Alice. It looked good-natured, she
+thought: still it had VERY long claws and a great many teeth, so she
+felt that it ought to be treated with respect.
+
+'Cheshire Puss,' she began, rather timidly, as she did not at all know
+whether it would like the name: however, it only grinned a little wider.
+'Come, it's pleased so far,' thought Alice, and she went on. 'Would you
+tell me, please, which way I ought to go from here?'
+
+'That depends a good deal on where you want to get to,' said the Cat.
+
+'I don't much care where--' said Alice.
+
+'Then it doesn't matter which way you go,' said the Cat.
+
+'--so long as I get SOMEWHERE,' Alice added as an explanation.
+
+'Oh, you're sure to do that,' said the Cat, 'if you only walk long
+enough.'
+
+Alice felt that this could not be denied, so she tried another question.
+'What sort of people live about here?'
+
+'In THAT direction,' the Cat said, waving its right paw round, 'lives
+a Hatter: and in THAT direction,' waving the other paw, 'lives a March
+Hare. Visit either you like: they're both mad.'
+
+'But I don't want to go among mad people,' Alice remarked.
+
+'Oh, you can't help that,' said the Cat: 'we're all mad here. I'm mad.
+You're mad.'
+
+'How do you know I'm mad?' said Alice.
+
+'You must be,' said the Cat, 'or you wouldn't have come here.'
+
+Alice didn't think that proved it at all; however, she went on 'And how
+do you know that you're mad?'
+
+'To begin with,' said the Cat, 'a dog's not mad. You grant that?'
+
+'I suppose so,' said Alice.
+
+'Well, then,' the Cat went on, 'you see, a dog growls when it's angry,
+and wags its tail when it's pleased. Now I growl when I'm pleased, and
+wag my tail when I'm angry. Therefore I'm mad.'
+
+'I call it purring, not growling,' said Alice.
+
+'Call it what you like,' said the Cat. 'Do you play croquet with the
+Queen to-day?'
+
+'I should like it very much,' said Alice, 'but I haven't been invited
+yet.'
+
+'You'll see me there,' said the Cat, and vanished.
+
+Alice was not much surprised at this, she was getting so used to queer
+things happening. While she was looking at the place where it had been,
+it suddenly appeared again.
+
+'By-the-bye, what became of the baby?' said the Cat. 'I'd nearly
+forgotten to ask.'
+
+'It turned into a pig,' Alice quietly said, just as if it had come back
+in a natural way.
+
+'I thought it would,' said the Cat, and vanished again.
+
+Alice waited a little, half expecting to see it again, but it did not
+appear, and after a minute or two she walked on in the direction in
+which the March Hare was said to live. 'I've seen hatters before,' she
+said to herself; 'the March Hare will be much the most interesting, and
+perhaps as this is May it won't be raving mad--at least not so mad as
+it was in March.' As she said this, she looked up, and there was the Cat
+again, sitting on a branch of a tree.
+
+'Did you say pig, or fig?' said the Cat.
+
+'I said pig,' replied Alice; 'and I wish you wouldn't keep appearing and
+vanishing so suddenly: you make one quite giddy.'
+
+'All right,' said the Cat; and this time it vanished quite slowly,
+beginning with the end of the tail, and ending with the grin, which
+remained some time after the rest of it had gone.
+
+'Well! I've often seen a cat without a grin,' thought Alice; 'but a grin
+without a cat! It's the most curious thing I ever saw in my life!'
+
+She had not gone much farther before she came in sight of the house
+of the March Hare: she thought it must be the right house, because the
+chimneys were shaped like ears and the roof was thatched with fur. It
+was so large a house, that she did not like to go nearer till she had
+nibbled some more of the lefthand bit of mushroom, and raised herself to
+about two feet high: even then she walked up towards it rather timidly,
+saying to herself 'Suppose it should be raving mad after all! I almost
+wish I'd gone to see the Hatter instead!'
+
+
+
+
+CHAPTER VII. A Mad Tea-Party
+
+There was a table set out under a tree in front of the house, and the
+March Hare and the Hatter were having tea at it: a Dormouse was sitting
+between them, fast asleep, and the other two were using it as a
+cushion, resting their elbows on it, and talking over its head. 'Very
+uncomfortable for the Dormouse,' thought Alice; 'only, as it's asleep, I
+suppose it doesn't mind.'
+
+The table was a large one, but the three were all crowded together at
+one corner of it: 'No room! No room!' they cried out when they saw Alice
+coming. 'There's PLENTY of room!' said Alice indignantly, and she sat
+down in a large arm-chair at one end of the table.
+
+'Have some wine,' the March Hare said in an encouraging tone.
+
+Alice looked all round the table, but there was nothing on it but tea.
+'I don't see any wine,' she remarked.
+
+'There isn't any,' said the March Hare.
+
+'Then it wasn't very civil of you to offer it,' said Alice angrily.
+
+'It wasn't very civil of you to sit down without being invited,' said
+the March Hare.
+
+'I didn't know it was YOUR table,' said Alice; 'it's laid for a great
+many more than three.'
+
+'Your hair wants cutting,' said the Hatter. He had been looking at Alice
+for some time with great curiosity, and this was his first speech.
+
+'You should learn not to make personal remarks,' Alice said with some
+severity; 'it's very rude.'
+
+The Hatter opened his eyes very wide on hearing this; but all he SAID
+was, 'Why is a raven like a writing-desk?'
+
+'Come, we shall have some fun now!' thought Alice. 'I'm glad they've
+begun asking riddles.--I believe I can guess that,' she added aloud.
+
+'Do you mean that you think you can find out the answer to it?' said the
+March Hare.
+
+'Exactly so,' said Alice.
+
+'Then you should say what you mean,' the March Hare went on.
+
+'I do,' Alice hastily replied; 'at least--at least I mean what I
+say--that's the same thing, you know.'
+
+'Not the same thing a bit!' said the Hatter. 'You might just as well say
+that "I see what I eat" is the same thing as "I eat what I see"!'
+
+'You might just as well say,' added the March Hare, 'that "I like what I
+get" is the same thing as "I get what I like"!'
+
+'You might just as well say,' added the Dormouse, who seemed to be
+talking in his sleep, 'that "I breathe when I sleep" is the same thing
+as "I sleep when I breathe"!'
+
+'It IS the same thing with you,' said the Hatter, and here the
+conversation dropped, and the party sat silent for a minute, while Alice
+thought over all she could remember about ravens and writing-desks,
+which wasn't much.
+
+The Hatter was the first to break the silence. 'What day of the month
+is it?' he said, turning to Alice: he had taken his watch out of his
+pocket, and was looking at it uneasily, shaking it every now and then,
+and holding it to his ear.
+
+Alice considered a little, and then said 'The fourth.'
+
+'Two days wrong!' sighed the Hatter. 'I told you butter wouldn't suit
+the works!' he added looking angrily at the March Hare.
+
+'It was the BEST butter,' the March Hare meekly replied.
+
+'Yes, but some crumbs must have got in as well,' the Hatter grumbled:
+'you shouldn't have put it in with the bread-knife.'
+
+The March Hare took the watch and looked at it gloomily: then he dipped
+it into his cup of tea, and looked at it again: but he could think of
+nothing better to say than his first remark, 'It was the BEST butter,
+you know.'
+
+Alice had been looking over his shoulder with some curiosity. 'What a
+funny watch!' she remarked. 'It tells the day of the month, and doesn't
+tell what o'clock it is!'
+
+'Why should it?' muttered the Hatter. 'Does YOUR watch tell you what
+year it is?'
+
+'Of course not,' Alice replied very readily: 'but that's because it
+stays the same year for such a long time together.'
+
+'Which is just the case with MINE,' said the Hatter.
+
+Alice felt dreadfully puzzled. The Hatter's remark seemed to have no
+sort of meaning in it, and yet it was certainly English. 'I don't quite
+understand you,' she said, as politely as she could.
+
+'The Dormouse is asleep again,' said the Hatter, and he poured a little
+hot tea upon its nose.
+
+The Dormouse shook its head impatiently, and said, without opening its
+eyes, 'Of course, of course; just what I was going to remark myself.'
+
+'Have you guessed the riddle yet?' the Hatter said, turning to Alice
+again.
+
+'No, I give it up,' Alice replied: 'what's the answer?'
+
+'I haven't the slightest idea,' said the Hatter.
+
+'Nor I,' said the March Hare.
+
+Alice sighed wearily. 'I think you might do something better with the
+time,' she said, 'than waste it in asking riddles that have no answers.'
+
+'If you knew Time as well as I do,' said the Hatter, 'you wouldn't talk
+about wasting IT. It's HIM.'
+
+'I don't know what you mean,' said Alice.
+
+'Of course you don't!' the Hatter said, tossing his head contemptuously.
+'I dare say you never even spoke to Time!'
+
+'Perhaps not,' Alice cautiously replied: 'but I know I have to beat time
+when I learn music.'
+
+'Ah! that accounts for it,' said the Hatter. 'He won't stand beating.
+Now, if you only kept on good terms with him, he'd do almost anything
+you liked with the clock. For instance, suppose it were nine o'clock in
+the morning, just time to begin lessons: you'd only have to whisper a
+hint to Time, and round goes the clock in a twinkling! Half-past one,
+time for dinner!'
+
+('I only wish it was,' the March Hare said to itself in a whisper.)
+
+'That would be grand, certainly,' said Alice thoughtfully: 'but then--I
+shouldn't be hungry for it, you know.'
+
+'Not at first, perhaps,' said the Hatter: 'but you could keep it to
+half-past one as long as you liked.'
+
+'Is that the way YOU manage?' Alice asked.
+
+The Hatter shook his head mournfully. 'Not I!' he replied. 'We
+quarrelled last March--just before HE went mad, you know--' (pointing
+with his tea spoon at the March Hare,) '--it was at the great concert
+given by the Queen of Hearts, and I had to sing
+
+     "Twinkle, twinkle, little bat!
+     How I wonder what you're at!"
+
+You know the song, perhaps?'
+
+'I've heard something like it,' said Alice.
+
+'It goes on, you know,' the Hatter continued, 'in this way:--
+
+     "Up above the world you fly,
+     Like a tea-tray in the sky.
+         Twinkle, twinkle--"'
+
+Here the Dormouse shook itself, and began singing in its sleep 'Twinkle,
+twinkle, twinkle, twinkle--' and went on so long that they had to pinch
+it to make it stop.
+
+'Well, I'd hardly finished the first verse,' said the Hatter, 'when the
+Queen jumped up and bawled out, "He's murdering the time! Off with his
+head!"'
+
+'How dreadfully savage!' exclaimed Alice.
+
+'And ever since that,' the Hatter went on in a mournful tone, 'he won't
+do a thing I ask! It's always six o'clock now.'
+
+A bright idea came into Alice's head. 'Is that the reason so many
+tea-things are put out here?' she asked.
+
+'Yes, that's it,' said the Hatter with a sigh: 'it's always tea-time,
+and we've no time to wash the things between whiles.'
+
+'Then you keep moving round, I suppose?' said Alice.
+
+'Exactly so,' said the Hatter: 'as the things get used up.'
+
+'But what happens when you come to the beginning again?' Alice ventured
+to ask.
+
+'Suppose we change the subject,' the March Hare interrupted, yawning.
+'I'm getting tired of this. I vote the young lady tells us a story.'
+
+'I'm afraid I don't know one,' said Alice, rather alarmed at the
+proposal.
+
+'Then the Dormouse shall!' they both cried. 'Wake up, Dormouse!' And
+they pinched it on both sides at once.
+
+The Dormouse slowly opened his eyes. 'I wasn't asleep,' he said in a
+hoarse, feeble voice: 'I heard every word you fellows were saying.'
+
+'Tell us a story!' said the March Hare.
+
+'Yes, please do!' pleaded Alice.
+
+'And be quick about it,' added the Hatter, 'or you'll be asleep again
+before it's done.'
+
+'Once upon a time there were three little sisters,' the Dormouse began
+in a great hurry; 'and their names were Elsie, Lacie, and Tillie; and
+they lived at the bottom of a well--'
+
+'What did they live on?' said Alice, who always took a great interest in
+questions of eating and drinking.
+
+'They lived on treacle,' said the Dormouse, after thinking a minute or
+two.
+
+'They couldn't have done that, you know,' Alice gently remarked; 'they'd
+have been ill.'
+
+'So they were,' said the Dormouse; 'VERY ill.'
+
+Alice tried to fancy to herself what such an extraordinary ways of
+living would be like, but it puzzled her too much, so she went on: 'But
+why did they live at the bottom of a well?'
+
+'Take some more tea,' the March Hare said to Alice, very earnestly.
+
+'I've had nothing yet,' Alice replied in an offended tone, 'so I can't
+take more.'
+
+'You mean you can't take LESS,' said the Hatter: 'it's very easy to take
+MORE than nothing.'
+
+'Nobody asked YOUR opinion,' said Alice.
+
+'Who's making personal remarks now?' the Hatter asked triumphantly.
+
+Alice did not quite know what to say to this: so she helped herself
+to some tea and bread-and-butter, and then turned to the Dormouse, and
+repeated her question. 'Why did they live at the bottom of a well?'
+
+The Dormouse again took a minute or two to think about it, and then
+said, 'It was a treacle-well.'
+
+'There's no such thing!' Alice was beginning very angrily, but the
+Hatter and the March Hare went 'Sh! sh!' and the Dormouse sulkily
+remarked, 'If you can't be civil, you'd better finish the story for
+yourself.'
+
+'No, please go on!' Alice said very humbly; 'I won't interrupt again. I
+dare say there may be ONE.'
+
+'One, indeed!' said the Dormouse indignantly. However, he consented to
+go on. 'And so these three little sisters--they were learning to draw,
+you know--'
+
+'What did they draw?' said Alice, quite forgetting her promise.
+
+'Treacle,' said the Dormouse, without considering at all this time.
+
+'I want a clean cup,' interrupted the Hatter: 'let's all move one place
+on.'
+
+He moved on as he spoke, and the Dormouse followed him: the March Hare
+moved into the Dormouse's place, and Alice rather unwillingly took
+the place of the March Hare. The Hatter was the only one who got any
+advantage from the change: and Alice was a good deal worse off than
+before, as the March Hare had just upset the milk-jug into his plate.
+
+Alice did not wish to offend the Dormouse again, so she began very
+cautiously: 'But I don't understand. Where did they draw the treacle
+from?'
+
+'You can draw water out of a water-well,' said the Hatter; 'so I should
+think you could draw treacle out of a treacle-well--eh, stupid?'
+
+'But they were IN the well,' Alice said to the Dormouse, not choosing to
+notice this last remark.
+
+'Of course they were', said the Dormouse; '--well in.'
+
+This answer so confused poor Alice, that she let the Dormouse go on for
+some time without interrupting it.
+
+'They were learning to draw,' the Dormouse went on, yawning and rubbing
+its eyes, for it was getting very sleepy; 'and they drew all manner of
+things--everything that begins with an M--'
+
+'Why with an M?' said Alice.
+
+'Why not?' said the March Hare.
+
+Alice was silent.
+
+The Dormouse had closed its eyes by this time, and was going off into
+a doze; but, on being pinched by the Hatter, it woke up again with
+a little shriek, and went on: '--that begins with an M, such as
+mouse-traps, and the moon, and memory, and muchness--you know you say
+things are "much of a muchness"--did you ever see such a thing as a
+drawing of a muchness?'
+
+'Really, now you ask me,' said Alice, very much confused, 'I don't
+think--'
+
+'Then you shouldn't talk,' said the Hatter.
+
+This piece of rudeness was more than Alice could bear: she got up in
+great disgust, and walked off; the Dormouse fell asleep instantly, and
+neither of the others took the least notice of her going, though she
+looked back once or twice, half hoping that they would call after her:
+the last time she saw them, they were trying to put the Dormouse into
+the teapot.
+
+'At any rate I'll never go THERE again!' said Alice as she picked her
+way through the wood. 'It's the stupidest tea-party I ever was at in all
+my life!'
+
+Just as she said this, she noticed that one of the trees had a door
+leading right into it. 'That's very curious!' she thought. 'But
+everything's curious today. I think I may as well go in at once.' And in
+she went.
+
+Once more she found herself in the long hall, and close to the little
+glass table. 'Now, I'll manage better this time,' she said to herself,
+and began by taking the little golden key, and unlocking the door that
+led into the garden. Then she went to work nibbling at the mushroom (she
+had kept a piece of it in her pocket) till she was about a foot high:
+then she walked down the little passage: and THEN--she found herself at
+last in the beautiful garden, among the bright flower-beds and the cool
+fountains.
+
+
+
+
+CHAPTER VIII. The Queen's Croquet-Ground
+
+A large rose-tree stood near the entrance of the garden: the roses
+growing on it were white, but there were three gardeners at it, busily
+painting them red. Alice thought this a very curious thing, and she went
+nearer to watch them, and just as she came up to them she heard one of
+them say, 'Look out now, Five! Don't go splashing paint over me like
+that!'
+
+'I couldn't help it,' said Five, in a sulky tone; 'Seven jogged my
+elbow.'
+
+On which Seven looked up and said, 'That's right, Five! Always lay the
+blame on others!'
+
+'YOU'D better not talk!' said Five. 'I heard the Queen say only
+yesterday you deserved to be beheaded!'
+
+'What for?' said the one who had spoken first.
+
+'That's none of YOUR business, Two!' said Seven.
+
+'Yes, it IS his business!' said Five, 'and I'll tell him--it was for
+bringing the cook tulip-roots instead of onions.'
+
+Seven flung down his brush, and had just begun 'Well, of all the unjust
+things--' when his eye chanced to fall upon Alice, as she stood watching
+them, and he checked himself suddenly: the others looked round also, and
+all of them bowed low.
+
+'Would you tell me,' said Alice, a little timidly, 'why you are painting
+those roses?'
+
+Five and Seven said nothing, but looked at Two. Two began in a low
+voice, 'Why the fact is, you see, Miss, this here ought to have been a
+RED rose-tree, and we put a white one in by mistake; and if the Queen
+was to find it out, we should all have our heads cut off, you know.
+So you see, Miss, we're doing our best, afore she comes, to--' At this
+moment Five, who had been anxiously looking across the garden, called
+out 'The Queen! The Queen!' and the three gardeners instantly threw
+themselves flat upon their faces. Th

<TRUNCATED>

[51/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
IGNITE-3916: Created separate module.


Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/857cdcde
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/857cdcde
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/857cdcde

Branch: refs/heads/ignite-1.6.8-hadoop
Commit: 857cdcde65f1885ee032a1a85462818c585dfa82
Parents: cb5f9ec
Author: vozerov-gridgain <vo...@gridgain.com>
Authored: Mon Sep 19 13:26:23 2016 +0300
Committer: vozerov-gridgain <vo...@gridgain.com>
Committed: Mon Sep 19 13:26:30 2016 +0300

----------------------------------------------------------------------
 assembly/dependencies-fabric-lgpl.xml           |     1 +
 assembly/dependencies-fabric.xml                |     1 +
 assembly/dependencies-hadoop.xml                |     1 +
 assembly/libs/README.txt                        |     3 +-
 .../ignite/internal/IgniteComponentType.java    |     2 +-
 .../processors/hadoop/HadoopNoopProcessor.java  |     5 +-
 modules/hadoop-impl/README.txt                  |    33 +
 modules/hadoop-impl/config/core-site.ignite.xml |    90 +
 modules/hadoop-impl/config/hive-site.ignite.xml |    37 +
 .../hadoop-impl/config/mapred-site.ignite.xml   |    66 +
 modules/hadoop-impl/licenses/apache-2.0.txt     |   202 +
 modules/hadoop-impl/pom.xml                     |   151 +
 .../hadoop/fs/BasicHadoopFileSystemFactory.java |   275 +
 .../fs/CachingHadoopFileSystemFactory.java      |    85 +
 .../hadoop/fs/HadoopFileSystemFactory.java      |    52 +
 .../fs/IgniteHadoopFileSystemCounterWriter.java |   103 +
 .../fs/IgniteHadoopIgfsSecondaryFileSystem.java |   580 +
 .../fs/KerberosHadoopFileSystemFactory.java     |   217 +
 .../apache/ignite/hadoop/fs/package-info.java   |    22 +
 .../hadoop/fs/v1/IgniteHadoopFileSystem.java    |  1364 ++
 .../ignite/hadoop/fs/v1/package-info.java       |    22 +
 .../hadoop/fs/v2/IgniteHadoopFileSystem.java    |  1076 ++
 .../ignite/hadoop/fs/v2/package-info.java       |    22 +
 .../IgniteHadoopClientProtocolProvider.java     |   144 +
 .../ignite/hadoop/mapreduce/package-info.java   |    22 +
 .../processors/hadoop/HadoopAttributes.java     |   168 +
 .../processors/hadoop/HadoopComponent.java      |    62 +
 .../processors/hadoop/HadoopContext.java        |   201 +
 .../processors/hadoop/HadoopDefaultJobInfo.java |   156 +
 .../internal/processors/hadoop/HadoopImpl.java  |   134 +
 .../hadoop/HadoopMapReduceCounterGroup.java     |   123 +
 .../hadoop/HadoopMapReduceCounters.java         |   228 +
 .../processors/hadoop/HadoopProcessor.java      |   223 +
 .../internal/processors/hadoop/HadoopSetup.java |   542 +
 .../hadoop/HadoopTaskCancelledException.java    |    35 +
 .../internal/processors/hadoop/HadoopUtils.java |   368 +
 .../hadoop/counter/HadoopCounterAdapter.java    |   129 +
 .../hadoop/counter/HadoopCountersImpl.java      |   200 +
 .../hadoop/counter/HadoopLongCounter.java       |    93 +
 .../counter/HadoopPerformanceCounter.java       |   288 +
 .../hadoop/fs/HadoopFileSystemCacheUtils.java   |   242 +
 .../hadoop/fs/HadoopFileSystemsUtils.java       |    51 +
 .../hadoop/fs/HadoopLazyConcurrentMap.java      |   212 +
 .../hadoop/fs/HadoopLocalFileSystemV1.java      |    39 +
 .../hadoop/fs/HadoopLocalFileSystemV2.java      |    88 +
 .../processors/hadoop/fs/HadoopParameters.java  |    94 +
 .../hadoop/fs/HadoopRawLocalFileSystem.java     |   314 +
 .../processors/hadoop/igfs/HadoopIgfs.java      |   202 +
 .../igfs/HadoopIgfsCommunicationException.java  |    57 +
 .../processors/hadoop/igfs/HadoopIgfsEx.java    |    93 +
 .../hadoop/igfs/HadoopIgfsFuture.java           |    97 +
 .../hadoop/igfs/HadoopIgfsInProc.java           |   510 +
 .../hadoop/igfs/HadoopIgfsInputStream.java      |   629 +
 .../processors/hadoop/igfs/HadoopIgfsIo.java    |    76 +
 .../processors/hadoop/igfs/HadoopIgfsIpcIo.java |   624 +
 .../hadoop/igfs/HadoopIgfsIpcIoListener.java    |    36 +
 .../hadoop/igfs/HadoopIgfsJclLogger.java        |   116 +
 .../hadoop/igfs/HadoopIgfsOutProc.java          |   524 +
 .../hadoop/igfs/HadoopIgfsOutputStream.java     |   201 +
 .../hadoop/igfs/HadoopIgfsProperties.java       |    86 +
 .../hadoop/igfs/HadoopIgfsProxyInputStream.java |   337 +
 .../igfs/HadoopIgfsProxyOutputStream.java       |   165 +
 ...fsSecondaryFileSystemPositionedReadable.java |   105 +
 .../hadoop/igfs/HadoopIgfsStreamDelegate.java   |    96 +
 .../igfs/HadoopIgfsStreamEventListener.java     |    39 +
 .../processors/hadoop/igfs/HadoopIgfsUtils.java |   174 +
 .../hadoop/igfs/HadoopIgfsWrapper.java          |   552 +
 .../hadoop/jobtracker/HadoopJobMetadata.java    |   316 +
 .../hadoop/jobtracker/HadoopJobTracker.java     |  1706 +++
 .../hadoop/message/HadoopMessage.java           |    27 +
 .../hadoop/proto/HadoopClientProtocol.java      |   349 +
 .../proto/HadoopProtocolJobCountersTask.java    |    46 +
 .../proto/HadoopProtocolJobStatusTask.java      |    82 +
 .../hadoop/proto/HadoopProtocolKillJobTask.java |    46 +
 .../proto/HadoopProtocolNextTaskIdTask.java     |    36 +
 .../proto/HadoopProtocolSubmitJobTask.java      |    59 +
 .../hadoop/proto/HadoopProtocolTaskAdapter.java |   120 +
 .../proto/HadoopProtocolTaskArguments.java      |    84 +
 .../hadoop/shuffle/HadoopShuffle.java           |   263 +
 .../hadoop/shuffle/HadoopShuffleAck.java        |    92 +
 .../hadoop/shuffle/HadoopShuffleJob.java        |   612 +
 .../hadoop/shuffle/HadoopShuffleMessage.java    |   242 +
 .../HadoopConcurrentHashMultimap.java           |   616 +
 .../shuffle/collections/HadoopHashMultimap.java |   176 +
 .../collections/HadoopHashMultimapBase.java     |   211 +
 .../shuffle/collections/HadoopMultimap.java     |   113 +
 .../shuffle/collections/HadoopMultimapBase.java |   435 +
 .../shuffle/collections/HadoopSkipList.java     |   733 +
 .../shuffle/streams/HadoopDataInStream.java     |   171 +
 .../shuffle/streams/HadoopDataOutStream.java    |   130 +
 .../shuffle/streams/HadoopOffheapBuffer.java    |   122 +
 .../HadoopEmbeddedTaskExecutor.java             |   153 +
 .../taskexecutor/HadoopExecutorService.java     |   234 +
 .../hadoop/taskexecutor/HadoopRunnableTask.java |   293 +
 .../taskexecutor/HadoopTaskExecutorAdapter.java |    59 +
 .../hadoop/taskexecutor/HadoopTaskState.java    |    38 +
 .../hadoop/taskexecutor/HadoopTaskStatus.java   |   116 +
 .../external/HadoopExternalTaskExecutor.java    |   976 ++
 .../external/HadoopExternalTaskMetadata.java    |    67 +
 .../external/HadoopJobInfoUpdateRequest.java    |   113 +
 .../external/HadoopPrepareForJobRequest.java    |   130 +
 .../external/HadoopProcessDescriptor.java       |   149 +
 .../external/HadoopProcessStartedAck.java       |    47 +
 .../external/HadoopTaskExecutionRequest.java    |   114 +
 .../external/HadoopTaskFinishedMessage.java     |    94 +
 .../child/HadoopChildProcessRunner.java         |   459 +
 .../child/HadoopExternalProcessStarter.java     |   301 +
 .../HadoopAbstractCommunicationClient.java      |    96 +
 .../HadoopCommunicationClient.java              |    72 +
 .../HadoopExternalCommunication.java            |  1460 ++
 .../HadoopHandshakeTimeoutException.java        |    42 +
 .../communication/HadoopIpcToNioAdapter.java    |   248 +
 .../communication/HadoopMarshallerFilter.java   |    86 +
 .../communication/HadoopMessageListener.java    |    39 +
 .../HadoopTcpNioCommunicationClient.java        |    93 +
 .../hadoop/v1/HadoopV1CleanupTask.java          |    64 +
 .../processors/hadoop/v1/HadoopV1Counter.java   |   106 +
 .../processors/hadoop/v1/HadoopV1MapTask.java   |   122 +
 .../hadoop/v1/HadoopV1OutputCollector.java      |   137 +
 .../hadoop/v1/HadoopV1Partitioner.java          |    44 +
 .../hadoop/v1/HadoopV1ReduceTask.java           |   101 +
 .../processors/hadoop/v1/HadoopV1Reporter.java  |    81 +
 .../processors/hadoop/v1/HadoopV1SetupTask.java |    56 +
 .../processors/hadoop/v1/HadoopV1Splitter.java  |   102 +
 .../processors/hadoop/v1/HadoopV1Task.java      |    97 +
 .../processors/hadoop/v2/HadoopDaemon.java      |   126 +
 .../hadoop/v2/HadoopExternalSplit.java          |    89 +
 .../hadoop/v2/HadoopSerializationWrapper.java   |   138 +
 .../hadoop/v2/HadoopShutdownHookManager.java    |    98 +
 .../hadoop/v2/HadoopSplitWrapper.java           |   119 +
 .../hadoop/v2/HadoopV2CleanupTask.java          |    72 +
 .../processors/hadoop/v2/HadoopV2Context.java   |   243 +
 .../processors/hadoop/v2/HadoopV2Counter.java   |    88 +
 .../processors/hadoop/v2/HadoopV2Job.java       |   445 +
 .../hadoop/v2/HadoopV2JobResourceManager.java   |   323 +
 .../processors/hadoop/v2/HadoopV2MapTask.java   |    99 +
 .../hadoop/v2/HadoopV2Partitioner.java          |    44 +
 .../hadoop/v2/HadoopV2ReduceTask.java           |    91 +
 .../processors/hadoop/v2/HadoopV2SetupTask.java |    65 +
 .../processors/hadoop/v2/HadoopV2Splitter.java  |   111 +
 .../processors/hadoop/v2/HadoopV2Task.java      |   185 +
 .../hadoop/v2/HadoopV2TaskContext.java          |   560 +
 .../hadoop/v2/HadoopWritableSerialization.java  |    75 +
 ...op.mapreduce.protocol.ClientProtocolProvider |     1 +
 .../HadoopClientProtocolEmbeddedSelfTest.java   |    35 +
 .../hadoop/HadoopClientProtocolSelfTest.java    |   654 +
 .../hadoop/cache/HadoopTxConfigCacheTest.java   |    42 +
 ...KerberosHadoopFileSystemFactorySelfTest.java |   121 +
 .../util/BasicUserNameMapperSelfTest.java       |   133 +
 .../util/ChainedUserNameMapperSelfTest.java     |   107 +
 .../util/KerberosUserNameMapperSelfTest.java    |    99 +
 .../ignite/igfs/Hadoop1DualAbstractTest.java    |   158 +
 .../igfs/Hadoop1OverIgfsDualAsyncTest.java      |    30 +
 .../igfs/Hadoop1OverIgfsDualSyncTest.java       |    30 +
 .../igfs/HadoopFIleSystemFactorySelfTest.java   |   317 +
 .../HadoopIgfs20FileSystemAbstractSelfTest.java |  2040 +++
 ...Igfs20FileSystemLoopbackPrimarySelfTest.java |    74 +
 ...oopIgfs20FileSystemShmemPrimarySelfTest.java |    74 +
 .../igfs/HadoopIgfsDualAbstractSelfTest.java    |   321 +
 .../igfs/HadoopIgfsDualAsyncSelfTest.java       |    32 +
 .../ignite/igfs/HadoopIgfsDualSyncSelfTest.java |    32 +
 ...adoopIgfsSecondaryFileSystemTestAdapter.java |   149 +
 ...oopSecondaryFileSystemConfigurationTest.java |   575 +
 .../apache/ignite/igfs/IgfsEventsTestSuite.java |   285 +
 .../igfs/IgfsNearOnlyMultiNodeSelfTest.java     |   223 +
 .../IgniteHadoopFileSystemAbstractSelfTest.java |  2432 +++
 .../IgniteHadoopFileSystemClientSelfTest.java   |   212 +
 ...IgniteHadoopFileSystemHandshakeSelfTest.java |   389 +
 .../IgniteHadoopFileSystemIpcCacheSelfTest.java |   214 +
 .../IgniteHadoopFileSystemLoggerSelfTest.java   |   298 +
 ...niteHadoopFileSystemLoggerStateSelfTest.java |   329 +
 ...adoopFileSystemLoopbackAbstractSelfTest.java |    46 +
 ...SystemLoopbackEmbeddedDualAsyncSelfTest.java |    33 +
 ...eSystemLoopbackEmbeddedDualSyncSelfTest.java |    33 +
 ...leSystemLoopbackEmbeddedPrimarySelfTest.java |    33 +
 ...SystemLoopbackEmbeddedSecondarySelfTest.java |    34 +
 ...SystemLoopbackExternalDualAsyncSelfTest.java |    33 +
 ...eSystemLoopbackExternalDualSyncSelfTest.java |    33 +
 ...leSystemLoopbackExternalPrimarySelfTest.java |    33 +
 ...SystemLoopbackExternalSecondarySelfTest.java |    34 +
 ...condaryFileSystemInitializationSelfTest.java |   214 +
 ...teHadoopFileSystemShmemAbstractSelfTest.java |    91 +
 ...ileSystemShmemEmbeddedDualAsyncSelfTest.java |    33 +
 ...FileSystemShmemEmbeddedDualSyncSelfTest.java |    33 +
 ...pFileSystemShmemEmbeddedPrimarySelfTest.java |    33 +
 ...ileSystemShmemEmbeddedSecondarySelfTest.java |    33 +
 ...ileSystemShmemExternalDualAsyncSelfTest.java |    33 +
 ...FileSystemShmemExternalDualSyncSelfTest.java |    33 +
 ...pFileSystemShmemExternalPrimarySelfTest.java |    33 +
 ...ileSystemShmemExternalSecondarySelfTest.java |    33 +
 .../hadoop/HadoopAbstractMapReduceTest.java     |   429 +
 .../hadoop/HadoopAbstractSelfTest.java          |   239 +
 .../hadoop/HadoopAbstractWordCountTest.java     |   175 +
 .../hadoop/HadoopClassLoaderTest.java           |   110 +
 .../hadoop/HadoopCommandLineTest.java           |   474 +
 .../HadoopDefaultMapReducePlannerSelfTest.java  |   615 +
 .../processors/hadoop/HadoopErrorSimulator.java |   326 +
 .../hadoop/HadoopFileSystemsTest.java           |   155 +
 .../processors/hadoop/HadoopGroupingTest.java   |   307 +
 .../hadoop/HadoopJobTrackerSelfTest.java        |   345 +
 .../hadoop/HadoopMapReduceEmbeddedSelfTest.java |   253 +
 .../HadoopMapReduceErrorResilienceTest.java     |   154 +
 .../processors/hadoop/HadoopMapReduceTest.java  |    66 +
 .../hadoop/HadoopNoHadoopMapReduceTest.java     |    47 +
 .../processors/hadoop/HadoopPlannerMockJob.java |   168 +
 .../hadoop/HadoopPopularWordsTest.java          |   298 +
 .../HadoopSerializationWrapperSelfTest.java     |    79 +
 .../processors/hadoop/HadoopSharedMap.java      |    66 +
 .../hadoop/HadoopSnappyFullMapReduceTest.java   |    36 +
 .../processors/hadoop/HadoopSnappyTest.java     |   102 +
 .../hadoop/HadoopSortingExternalTest.java       |    46 +
 .../processors/hadoop/HadoopSortingTest.java    |   303 +
 .../hadoop/HadoopSplitWrapperSelfTest.java      |    72 +
 .../processors/hadoop/HadoopStartup.java        |    54 +
 .../hadoop/HadoopTaskExecutionSelfTest.java     |   567 +
 .../hadoop/HadoopTasksAllVersionsTest.java      |   260 +
 .../processors/hadoop/HadoopTasksV1Test.java    |    58 +
 .../processors/hadoop/HadoopTasksV2Test.java    |    77 +
 .../hadoop/HadoopTestRoundRobinMrPlanner.java   |    71 +
 .../hadoop/HadoopTestTaskContext.java           |   228 +
 .../processors/hadoop/HadoopTestUtils.java      |   178 +
 .../hadoop/HadoopUserLibsSelfTest.java          |   260 +
 .../processors/hadoop/HadoopV2JobSelfTest.java  |   100 +
 .../hadoop/HadoopValidationSelfTest.java        |    53 +
 .../HadoopWeightedMapReducePlannerTest.java     |   599 +
 .../HadoopWeightedPlannerMapReduceTest.java     |    38 +
 .../hadoop/books/alice-in-wonderland.txt        |  3735 +++++
 .../processors/hadoop/books/art-of-war.txt      |  6982 +++++++++
 .../hadoop/books/huckleberry-finn.txt           | 11733 +++++++++++++++
 .../processors/hadoop/books/sherlock-holmes.txt | 13052 +++++++++++++++++
 .../processors/hadoop/books/tom-sawyer.txt      |  8858 +++++++++++
 .../hadoop/deps/CircularWIthHadoop.java         |    32 +
 .../hadoop/deps/CircularWithoutHadoop.java      |    27 +
 .../processors/hadoop/deps/WithCast.java        |    41 +
 .../hadoop/deps/WithClassAnnotation.java        |    28 +
 .../hadoop/deps/WithConstructorInvocation.java  |    31 +
 .../processors/hadoop/deps/WithExtends.java     |    27 +
 .../processors/hadoop/deps/WithField.java       |    29 +
 .../processors/hadoop/deps/WithImplements.java  |    36 +
 .../hadoop/deps/WithIndirectField.java          |    27 +
 .../processors/hadoop/deps/WithInitializer.java |    33 +
 .../processors/hadoop/deps/WithInnerClass.java  |    31 +
 .../hadoop/deps/WithLocalVariable.java          |    38 +
 .../hadoop/deps/WithMethodAnnotation.java       |    32 +
 .../hadoop/deps/WithMethodArgument.java         |    31 +
 .../hadoop/deps/WithMethodCheckedException.java |    31 +
 .../hadoop/deps/WithMethodInvocation.java       |    31 +
 .../hadoop/deps/WithMethodReturnType.java       |    31 +
 .../hadoop/deps/WithMethodRuntimeException.java |    31 +
 .../processors/hadoop/deps/WithOuterClass.java  |    38 +
 .../hadoop/deps/WithParameterAnnotation.java    |    31 +
 .../processors/hadoop/deps/WithStaticField.java |    29 +
 .../hadoop/deps/WithStaticInitializer.java      |    34 +
 .../processors/hadoop/deps/Without.java         |    25 +
 .../hadoop/examples/HadoopWordCount1.java       |    94 +
 .../hadoop/examples/HadoopWordCount1Map.java    |    79 +
 .../hadoop/examples/HadoopWordCount1Reduce.java |    61 +
 .../hadoop/examples/HadoopWordCount2.java       |   111 +
 .../examples/HadoopWordCount2Combiner.java      |    45 +
 .../hadoop/examples/HadoopWordCount2Mapper.java |    88 +
 .../examples/HadoopWordCount2Reducer.java       |   113 +
 .../collections/HadoopAbstractMapTest.java      |   174 +
 .../HadoopConcurrentHashMultimapSelftest.java   |   278 +
 .../collections/HadoopHashMapSelfTest.java      |   131 +
 .../collections/HadoopSkipListSelfTest.java     |   318 +
 .../streams/HadoopDataStreamSelfTest.java       |   150 +
 .../taskexecutor/HadoopExecutorServiceTest.java |   118 +
 .../HadoopExternalTaskExecutionSelfTest.java    |   232 +
 .../HadoopExternalCommunicationSelfTest.java    |   220 +
 .../testsuites/IgniteHadoopTestSuite.java       |   354 +
 .../IgniteIgfsLinuxAndMacOSTestSuite.java       |    72 +
 modules/hadoop/pom.xml                          |    36 -
 .../hadoop/fs/BasicHadoopFileSystemFactory.java |   275 -
 .../fs/CachingHadoopFileSystemFactory.java      |    85 -
 .../hadoop/fs/HadoopFileSystemFactory.java      |    52 -
 .../fs/IgniteHadoopFileSystemCounterWriter.java |   103 -
 .../fs/IgniteHadoopIgfsSecondaryFileSystem.java |   580 -
 .../fs/KerberosHadoopFileSystemFactory.java     |   217 -
 .../apache/ignite/hadoop/fs/package-info.java   |    22 -
 .../hadoop/fs/v1/IgniteHadoopFileSystem.java    |  1364 --
 .../ignite/hadoop/fs/v1/package-info.java       |    22 -
 .../hadoop/fs/v2/IgniteHadoopFileSystem.java    |  1076 --
 .../ignite/hadoop/fs/v2/package-info.java       |    22 -
 .../IgniteHadoopClientProtocolProvider.java     |   144 -
 .../mapreduce/IgniteHadoopMapReducePlanner.java |    22 +-
 .../IgniteHadoopWeightedMapReducePlanner.java   |     4 +-
 .../ignite/hadoop/mapreduce/package-info.java   |    22 -
 .../ignite/hadoop/util/UserNameMapper.java      |     4 +-
 .../processors/hadoop/HadoopAttributes.java     |   168 -
 .../processors/hadoop/HadoopCommonUtils.java    |   110 +
 .../processors/hadoop/HadoopComponent.java      |    62 -
 .../processors/hadoop/HadoopContext.java        |   201 -
 .../processors/hadoop/HadoopDefaultJobInfo.java |   156 -
 .../internal/processors/hadoop/HadoopImpl.java  |   134 -
 .../hadoop/HadoopMapReduceCounterGroup.java     |   123 -
 .../hadoop/HadoopMapReduceCounters.java         |   228 -
 .../processors/hadoop/HadoopProcessor.java      |   223 -
 .../internal/processors/hadoop/HadoopSetup.java |   541 -
 .../hadoop/HadoopTaskCancelledException.java    |    35 -
 .../internal/processors/hadoop/HadoopUtils.java |   443 -
 .../hadoop/counter/HadoopCounterAdapter.java    |   129 -
 .../hadoop/counter/HadoopCountersImpl.java      |   200 -
 .../hadoop/counter/HadoopLongCounter.java       |    93 -
 .../counter/HadoopPerformanceCounter.java       |   288 -
 .../hadoop/fs/HadoopFileSystemCacheUtils.java   |   242 -
 .../hadoop/fs/HadoopFileSystemsUtils.java       |    51 -
 .../hadoop/fs/HadoopLazyConcurrentMap.java      |   212 -
 .../hadoop/fs/HadoopLocalFileSystemV1.java      |    39 -
 .../hadoop/fs/HadoopLocalFileSystemV2.java      |    88 -
 .../processors/hadoop/fs/HadoopParameters.java  |    94 -
 .../hadoop/fs/HadoopRawLocalFileSystem.java     |   314 -
 .../processors/hadoop/igfs/HadoopIgfs.java      |   202 -
 .../igfs/HadoopIgfsCommunicationException.java  |    57 -
 .../processors/hadoop/igfs/HadoopIgfsEx.java    |    93 -
 .../hadoop/igfs/HadoopIgfsFuture.java           |    97 -
 .../hadoop/igfs/HadoopIgfsInProc.java           |   510 -
 .../hadoop/igfs/HadoopIgfsInputStream.java      |   629 -
 .../processors/hadoop/igfs/HadoopIgfsIo.java    |    76 -
 .../processors/hadoop/igfs/HadoopIgfsIpcIo.java |   624 -
 .../hadoop/igfs/HadoopIgfsIpcIoListener.java    |    36 -
 .../hadoop/igfs/HadoopIgfsJclLogger.java        |   116 -
 .../hadoop/igfs/HadoopIgfsOutProc.java          |   524 -
 .../hadoop/igfs/HadoopIgfsOutputStream.java     |   201 -
 .../hadoop/igfs/HadoopIgfsProperties.java       |    86 -
 .../hadoop/igfs/HadoopIgfsProxyInputStream.java |   337 -
 .../igfs/HadoopIgfsProxyOutputStream.java       |   165 -
 ...fsSecondaryFileSystemPositionedReadable.java |   105 -
 .../hadoop/igfs/HadoopIgfsStreamDelegate.java   |    96 -
 .../igfs/HadoopIgfsStreamEventListener.java     |    39 -
 .../processors/hadoop/igfs/HadoopIgfsUtils.java |   174 -
 .../hadoop/igfs/HadoopIgfsWrapper.java          |   552 -
 .../hadoop/jobtracker/HadoopJobMetadata.java    |   316 -
 .../hadoop/jobtracker/HadoopJobTracker.java     |  1706 ---
 .../hadoop/message/HadoopMessage.java           |    27 -
 .../planner/HadoopDefaultMapReducePlan.java     |     7 +-
 .../hadoop/proto/HadoopClientProtocol.java      |   349 -
 .../proto/HadoopProtocolJobCountersTask.java    |    46 -
 .../proto/HadoopProtocolJobStatusTask.java      |    82 -
 .../hadoop/proto/HadoopProtocolKillJobTask.java |    46 -
 .../proto/HadoopProtocolNextTaskIdTask.java     |    36 -
 .../proto/HadoopProtocolSubmitJobTask.java      |    59 -
 .../hadoop/proto/HadoopProtocolTaskAdapter.java |   120 -
 .../proto/HadoopProtocolTaskArguments.java      |    84 -
 .../hadoop/shuffle/HadoopShuffle.java           |   263 -
 .../hadoop/shuffle/HadoopShuffleAck.java        |    92 -
 .../hadoop/shuffle/HadoopShuffleJob.java        |   612 -
 .../hadoop/shuffle/HadoopShuffleMessage.java    |   242 -
 .../HadoopConcurrentHashMultimap.java           |   616 -
 .../shuffle/collections/HadoopHashMultimap.java |   176 -
 .../collections/HadoopHashMultimapBase.java     |   211 -
 .../shuffle/collections/HadoopMultimap.java     |   113 -
 .../shuffle/collections/HadoopMultimapBase.java |   435 -
 .../shuffle/collections/HadoopSkipList.java     |   733 -
 .../shuffle/streams/HadoopDataInStream.java     |   171 -
 .../shuffle/streams/HadoopDataOutStream.java    |   130 -
 .../shuffle/streams/HadoopOffheapBuffer.java    |   122 -
 .../HadoopEmbeddedTaskExecutor.java             |   153 -
 .../taskexecutor/HadoopExecutorService.java     |   234 -
 .../hadoop/taskexecutor/HadoopRunnableTask.java |   293 -
 .../taskexecutor/HadoopTaskExecutorAdapter.java |    59 -
 .../hadoop/taskexecutor/HadoopTaskState.java    |    38 -
 .../hadoop/taskexecutor/HadoopTaskStatus.java   |   116 -
 .../external/HadoopExternalTaskExecutor.java    |   976 --
 .../external/HadoopExternalTaskMetadata.java    |    67 -
 .../external/HadoopJobInfoUpdateRequest.java    |   113 -
 .../external/HadoopPrepareForJobRequest.java    |   130 -
 .../external/HadoopProcessDescriptor.java       |   149 -
 .../external/HadoopProcessStartedAck.java       |    47 -
 .../external/HadoopTaskExecutionRequest.java    |   114 -
 .../external/HadoopTaskFinishedMessage.java     |    94 -
 .../child/HadoopChildProcessRunner.java         |   459 -
 .../child/HadoopExternalProcessStarter.java     |   301 -
 .../HadoopAbstractCommunicationClient.java      |    96 -
 .../HadoopCommunicationClient.java              |    72 -
 .../HadoopExternalCommunication.java            |  1460 --
 .../HadoopHandshakeTimeoutException.java        |    42 -
 .../communication/HadoopIpcToNioAdapter.java    |   248 -
 .../communication/HadoopMarshallerFilter.java   |    86 -
 .../communication/HadoopMessageListener.java    |    39 -
 .../HadoopTcpNioCommunicationClient.java        |    93 -
 .../hadoop/v1/HadoopV1CleanupTask.java          |    64 -
 .../processors/hadoop/v1/HadoopV1Counter.java   |   106 -
 .../processors/hadoop/v1/HadoopV1MapTask.java   |   122 -
 .../hadoop/v1/HadoopV1OutputCollector.java      |   137 -
 .../hadoop/v1/HadoopV1Partitioner.java          |    44 -
 .../hadoop/v1/HadoopV1ReduceTask.java           |   101 -
 .../processors/hadoop/v1/HadoopV1Reporter.java  |    81 -
 .../processors/hadoop/v1/HadoopV1SetupTask.java |    56 -
 .../processors/hadoop/v1/HadoopV1Splitter.java  |   102 -
 .../processors/hadoop/v1/HadoopV1Task.java      |    97 -
 .../processors/hadoop/v2/HadoopDaemon.java      |   126 -
 .../hadoop/v2/HadoopExternalSplit.java          |    89 -
 .../hadoop/v2/HadoopSerializationWrapper.java   |   138 -
 .../hadoop/v2/HadoopShutdownHookManager.java    |    98 -
 .../hadoop/v2/HadoopSplitWrapper.java           |   119 -
 .../hadoop/v2/HadoopV2CleanupTask.java          |    72 -
 .../processors/hadoop/v2/HadoopV2Context.java   |   243 -
 .../processors/hadoop/v2/HadoopV2Counter.java   |    88 -
 .../processors/hadoop/v2/HadoopV2Job.java       |   445 -
 .../hadoop/v2/HadoopV2JobResourceManager.java   |   323 -
 .../processors/hadoop/v2/HadoopV2MapTask.java   |    99 -
 .../hadoop/v2/HadoopV2Partitioner.java          |    44 -
 .../hadoop/v2/HadoopV2ReduceTask.java           |    91 -
 .../processors/hadoop/v2/HadoopV2SetupTask.java |    65 -
 .../processors/hadoop/v2/HadoopV2Splitter.java  |   111 -
 .../processors/hadoop/v2/HadoopV2Task.java      |   185 -
 .../hadoop/v2/HadoopV2TaskContext.java          |   560 -
 .../hadoop/v2/HadoopWritableSerialization.java  |    75 -
 ...op.mapreduce.protocol.ClientProtocolProvider |     1 -
 .../HadoopClientProtocolEmbeddedSelfTest.java   |    35 -
 .../hadoop/HadoopClientProtocolSelfTest.java    |   654 -
 .../hadoop/cache/HadoopTxConfigCacheTest.java   |    42 -
 ...KerberosHadoopFileSystemFactorySelfTest.java |   121 -
 .../util/BasicUserNameMapperSelfTest.java       |   133 -
 .../util/ChainedUserNameMapperSelfTest.java     |   107 -
 .../util/KerberosUserNameMapperSelfTest.java    |    99 -
 .../ignite/igfs/Hadoop1DualAbstractTest.java    |   158 -
 .../igfs/Hadoop1OverIgfsDualAsyncTest.java      |    30 -
 .../igfs/Hadoop1OverIgfsDualSyncTest.java       |    30 -
 .../igfs/HadoopFIleSystemFactorySelfTest.java   |   317 -
 .../HadoopIgfs20FileSystemAbstractSelfTest.java |  2040 ---
 ...Igfs20FileSystemLoopbackPrimarySelfTest.java |    74 -
 ...oopIgfs20FileSystemShmemPrimarySelfTest.java |    74 -
 .../igfs/HadoopIgfsDualAbstractSelfTest.java    |   321 -
 .../igfs/HadoopIgfsDualAsyncSelfTest.java       |    32 -
 .../ignite/igfs/HadoopIgfsDualSyncSelfTest.java |    32 -
 ...adoopIgfsSecondaryFileSystemTestAdapter.java |   149 -
 ...oopSecondaryFileSystemConfigurationTest.java |   575 -
 .../apache/ignite/igfs/IgfsEventsTestSuite.java |   285 -
 .../igfs/IgfsNearOnlyMultiNodeSelfTest.java     |   223 -
 .../IgniteHadoopFileSystemAbstractSelfTest.java |  2432 ---
 .../IgniteHadoopFileSystemClientSelfTest.java   |   212 -
 ...IgniteHadoopFileSystemHandshakeSelfTest.java |   389 -
 .../IgniteHadoopFileSystemIpcCacheSelfTest.java |   214 -
 .../IgniteHadoopFileSystemLoggerSelfTest.java   |   298 -
 ...niteHadoopFileSystemLoggerStateSelfTest.java |   329 -
 ...adoopFileSystemLoopbackAbstractSelfTest.java |    46 -
 ...SystemLoopbackEmbeddedDualAsyncSelfTest.java |    33 -
 ...eSystemLoopbackEmbeddedDualSyncSelfTest.java |    33 -
 ...leSystemLoopbackEmbeddedPrimarySelfTest.java |    33 -
 ...SystemLoopbackEmbeddedSecondarySelfTest.java |    34 -
 ...SystemLoopbackExternalDualAsyncSelfTest.java |    33 -
 ...eSystemLoopbackExternalDualSyncSelfTest.java |    33 -
 ...leSystemLoopbackExternalPrimarySelfTest.java |    33 -
 ...SystemLoopbackExternalSecondarySelfTest.java |    34 -
 ...condaryFileSystemInitializationSelfTest.java |   214 -
 ...teHadoopFileSystemShmemAbstractSelfTest.java |    91 -
 ...ileSystemShmemEmbeddedDualAsyncSelfTest.java |    33 -
 ...FileSystemShmemEmbeddedDualSyncSelfTest.java |    33 -
 ...pFileSystemShmemEmbeddedPrimarySelfTest.java |    33 -
 ...ileSystemShmemEmbeddedSecondarySelfTest.java |    33 -
 ...ileSystemShmemExternalDualAsyncSelfTest.java |    33 -
 ...FileSystemShmemExternalDualSyncSelfTest.java |    33 -
 ...pFileSystemShmemExternalPrimarySelfTest.java |    33 -
 ...ileSystemShmemExternalSecondarySelfTest.java |    33 -
 .../hadoop/HadoopAbstractMapReduceTest.java     |   429 -
 .../hadoop/HadoopAbstractSelfTest.java          |   239 -
 .../hadoop/HadoopAbstractWordCountTest.java     |   175 -
 .../hadoop/HadoopClassLoaderTest.java           |   110 -
 .../hadoop/HadoopCommandLineTest.java           |   474 -
 .../HadoopDefaultMapReducePlannerSelfTest.java  |   615 -
 .../processors/hadoop/HadoopErrorSimulator.java |   326 -
 .../hadoop/HadoopFileSystemsTest.java           |   155 -
 .../processors/hadoop/HadoopGroupingTest.java   |   307 -
 .../hadoop/HadoopJobTrackerSelfTest.java        |   345 -
 .../hadoop/HadoopMapReduceEmbeddedSelfTest.java |   253 -
 .../HadoopMapReduceErrorResilienceTest.java     |   154 -
 .../processors/hadoop/HadoopMapReduceTest.java  |    66 -
 .../hadoop/HadoopNoHadoopMapReduceTest.java     |    47 -
 .../processors/hadoop/HadoopPlannerMockJob.java |   168 -
 .../hadoop/HadoopPopularWordsTest.java          |   298 -
 .../HadoopSerializationWrapperSelfTest.java     |    79 -
 .../processors/hadoop/HadoopSharedMap.java      |    66 -
 .../hadoop/HadoopSnappyFullMapReduceTest.java   |    36 -
 .../processors/hadoop/HadoopSnappyTest.java     |   102 -
 .../hadoop/HadoopSortingExternalTest.java       |    46 -
 .../processors/hadoop/HadoopSortingTest.java    |   303 -
 .../hadoop/HadoopSplitWrapperSelfTest.java      |    72 -
 .../processors/hadoop/HadoopStartup.java        |    54 -
 .../hadoop/HadoopTaskExecutionSelfTest.java     |   567 -
 .../hadoop/HadoopTasksAllVersionsTest.java      |   260 -
 .../processors/hadoop/HadoopTasksV1Test.java    |    58 -
 .../processors/hadoop/HadoopTasksV2Test.java    |    77 -
 .../hadoop/HadoopTestRoundRobinMrPlanner.java   |    71 -
 .../hadoop/HadoopTestTaskContext.java           |   228 -
 .../processors/hadoop/HadoopTestUtils.java      |   178 -
 .../hadoop/HadoopUserLibsSelfTest.java          |   260 -
 .../processors/hadoop/HadoopV2JobSelfTest.java  |   100 -
 .../hadoop/HadoopValidationSelfTest.java        |    53 -
 .../HadoopWeightedMapReducePlannerTest.java     |   599 -
 .../HadoopWeightedPlannerMapReduceTest.java     |    38 -
 .../hadoop/books/alice-in-wonderland.txt        |  3735 -----
 .../processors/hadoop/books/art-of-war.txt      |  6982 ---------
 .../hadoop/books/huckleberry-finn.txt           | 11733 ---------------
 .../processors/hadoop/books/sherlock-holmes.txt | 13052 -----------------
 .../processors/hadoop/books/tom-sawyer.txt      |  8858 -----------
 .../hadoop/deps/CircularWIthHadoop.java         |    32 -
 .../hadoop/deps/CircularWithoutHadoop.java      |    27 -
 .../processors/hadoop/deps/WithCast.java        |    41 -
 .../hadoop/deps/WithClassAnnotation.java        |    28 -
 .../hadoop/deps/WithConstructorInvocation.java  |    31 -
 .../processors/hadoop/deps/WithExtends.java     |    27 -
 .../processors/hadoop/deps/WithField.java       |    29 -
 .../processors/hadoop/deps/WithImplements.java  |    36 -
 .../hadoop/deps/WithIndirectField.java          |    27 -
 .../processors/hadoop/deps/WithInitializer.java |    33 -
 .../processors/hadoop/deps/WithInnerClass.java  |    31 -
 .../hadoop/deps/WithLocalVariable.java          |    38 -
 .../hadoop/deps/WithMethodAnnotation.java       |    32 -
 .../hadoop/deps/WithMethodArgument.java         |    31 -
 .../hadoop/deps/WithMethodCheckedException.java |    31 -
 .../hadoop/deps/WithMethodInvocation.java       |    31 -
 .../hadoop/deps/WithMethodReturnType.java       |    31 -
 .../hadoop/deps/WithMethodRuntimeException.java |    31 -
 .../processors/hadoop/deps/WithOuterClass.java  |    38 -
 .../hadoop/deps/WithParameterAnnotation.java    |    31 -
 .../processors/hadoop/deps/WithStaticField.java |    29 -
 .../hadoop/deps/WithStaticInitializer.java      |    34 -
 .../processors/hadoop/deps/Without.java         |    25 -
 .../hadoop/examples/HadoopWordCount1.java       |    94 -
 .../hadoop/examples/HadoopWordCount1Map.java    |    79 -
 .../hadoop/examples/HadoopWordCount1Reduce.java |    61 -
 .../hadoop/examples/HadoopWordCount2.java       |   111 -
 .../examples/HadoopWordCount2Combiner.java      |    45 -
 .../hadoop/examples/HadoopWordCount2Mapper.java |    88 -
 .../examples/HadoopWordCount2Reducer.java       |   113 -
 .../collections/HadoopAbstractMapTest.java      |   174 -
 .../HadoopConcurrentHashMultimapSelftest.java   |   278 -
 .../collections/HadoopHashMapSelfTest.java      |   131 -
 .../collections/HadoopSkipListSelfTest.java     |   318 -
 .../streams/HadoopDataStreamSelfTest.java       |   150 -
 .../taskexecutor/HadoopExecutorServiceTest.java |   118 -
 .../HadoopExternalTaskExecutionSelfTest.java    |   232 -
 .../HadoopExternalCommunicationSelfTest.java    |   220 -
 .../testsuites/IgniteHadoopTestSuite.java       |   354 -
 .../IgniteIgfsLinuxAndMacOSTestSuite.java       |    72 -
 .../src/main/js/app/data/pom-dependencies.json  |     1 +
 .../configuration/generator/Pom.service.js      |     4 +-
 pom.xml                                         |     1 +
 539 files changed, 94978 insertions(+), 94391 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/assembly/dependencies-fabric-lgpl.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-fabric-lgpl.xml b/assembly/dependencies-fabric-lgpl.xml
index 2b4cf62..0eef736 100644
--- a/assembly/dependencies-fabric-lgpl.xml
+++ b/assembly/dependencies-fabric-lgpl.xml
@@ -126,6 +126,7 @@
                 <exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
                 <exclude>org.apache.ignite:ignite-visor-plugins</exclude>
                 <exclude>org.apache.ignite:ignite-hadoop</exclude>
+                <exclude>org.apache.ignite:ignite-hadoop-impl</exclude>
                 <exclude>org.apache.ignite:ignite-schema-import</exclude>
                 <exclude>org.apache.ignite:ignite-schema-import-db</exclude>
                 <exclude>org.apache.ignite:ignite-codegen</exclude>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/assembly/dependencies-fabric.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-fabric.xml b/assembly/dependencies-fabric.xml
index ff4075a..44a234e 100644
--- a/assembly/dependencies-fabric.xml
+++ b/assembly/dependencies-fabric.xml
@@ -126,6 +126,7 @@
                 <exclude>org.apache.ignite:ignite-visor-console_2.10</exclude>
                 <exclude>org.apache.ignite:ignite-visor-plugins</exclude>
                 <exclude>org.apache.ignite:ignite-hadoop</exclude>
+                <exclude>org.apache.ignite:ignite-hadoop-impl</exclude>
                 <exclude>org.apache.ignite:ignite-schema-import</exclude>
                 <exclude>org.apache.ignite:ignite-schema-import-db</exclude>
                 <exclude>org.apache.ignite:ignite-codegen</exclude>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/assembly/dependencies-hadoop.xml
----------------------------------------------------------------------
diff --git a/assembly/dependencies-hadoop.xml b/assembly/dependencies-hadoop.xml
index 38646ba..ef0a3ce 100644
--- a/assembly/dependencies-hadoop.xml
+++ b/assembly/dependencies-hadoop.xml
@@ -113,6 +113,7 @@
         <moduleSet>
             <includes>
                 <include>org.apache.ignite:ignite-hadoop</include>
+                <include>org.apache.ignite:ignite-hadoop-impl</include>
             </includes>
             <sources>
                 <includeModuleDirectory>true</includeModuleDirectory>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/assembly/libs/README.txt
----------------------------------------------------------------------
diff --git a/assembly/libs/README.txt b/assembly/libs/README.txt
index 38d8dbd..2fa3e7c 100644
--- a/assembly/libs/README.txt
+++ b/assembly/libs/README.txt
@@ -79,7 +79,8 @@ The following modules are available:
 - ignite-logj4 (for Log4j logging)
 - ignite-jcl (for Apache Commons logging)
 - ignite-jta (for XA integration)
-- ignite-hadoop (for Apache Hadoop Accelerator)
+- ignite-hadoop (for Apache Hadoop Accelerator interfaces)
+- ignite-hadoop-impl (for Apache Hadoop Accelerator implementation)
 - ignite-rest-http (for HTTP REST messages)
 - ignite-scalar (for ignite Scala API)
 - ignite-sl4j (for SL4J logging)

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
index 76e495f..b182bd8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java
@@ -38,7 +38,7 @@ public enum IgniteComponentType {
     HADOOP(
         "org.apache.ignite.internal.processors.hadoop.HadoopNoopProcessor",
         "org.apache.ignite.internal.processors.hadoop.HadoopProcessor",
-        "ignite-hadoop"
+        "ignite-hadoop-impl"
     ),
 
     /** IGFS helper component. */

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
----------------------------------------------------------------------
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
index 501870a..9f388fe 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java
@@ -79,8 +79,9 @@ public class HadoopNoopProcessor extends HadoopProcessorAdapter {
      * Creates an exception to be uniformly thrown from all the methods.
      */
     private IllegalStateException createException() {
-        return new IllegalStateException("Hadoop module is not loaded (please ensure that ignite-hadoop.jar is in " +
-            "classpath and IgniteConfiguration.peerClassLoadingEnabled is set to false).");
+        return new IllegalStateException("Hadoop module is not loaded (please ensure that ignite-hadoop.jar and " +
+            "ignite-hadoop-impl.jar are in libs and IgniteConfiguration.peerClassLoadingEnabled is set to " +
+            "false).");
     }
 
     /** {@inheritDoc} */

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/README.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/README.txt b/modules/hadoop-impl/README.txt
new file mode 100644
index 0000000..ecd47e0
--- /dev/null
+++ b/modules/hadoop-impl/README.txt
@@ -0,0 +1,33 @@
+Apache Ignite Hadoop Module
+---------------------------
+
+Apache Ignite Hadoop module provides In-Memory MapReduce engine and driver to use IGFS as Hadoop file system
+which are 100% compatible with HDFS and YARN.
+
+To enable Hadoop module when starting a standalone node, move 'optional/ignite-hadoop' folder to
+'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
+be added to classpath in this case.
+
+Importing Hadoop Module In Maven Project
+----------------------------------------
+
+If you are using Maven to manage dependencies of your project, you can add Hadoop module
+dependency like this (replace '${ignite.version}' with actual Ignite version you are
+interested in):
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    ...
+    <dependencies>
+        ...
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-hadoop</artifactId>
+            <version>${ignite.version}</version>
+        </dependency>
+        ...
+    </dependencies>
+    ...
+</project>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/config/core-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/core-site.ignite.xml b/modules/hadoop-impl/config/core-site.ignite.xml
new file mode 100644
index 0000000..8b8e634
--- /dev/null
+++ b/modules/hadoop-impl/config/core-site.ignite.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    This template file contains settings needed to run Apache Hadoop jobs
+    with Apache Ignite's distributed in-memory file system IGFS.
+
+    You can replace '$HADOOP_HOME/etc/hadoop/core-site.xml' file with this one
+    to work with IGFS nodes running on localhost (these local nodes can be
+    a part of distributed cluster though). To work with file system on remote
+    hosts you need to change the host of file system URI to any host running
+    IGFS node.
+
+    Note that Ignite jars must be in Apache Hadoop client classpath to work
+    with this configuration.
+
+    Run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop client setup.
+-->
+
+<configuration>
+    <!--
+        Set default file system to IGFS instance named "igfs" configured in Ignite.
+    -->
+    <property>
+        <name>fs.default.name</name>
+        <value>igfs://igfs@localhost</value>
+    </property>
+
+    <!--
+        Set Hadoop 1.* file system implementation class for IGFS.
+    -->
+    <property>
+        <name>fs.igfs.impl</name>
+        <value>org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem</value>
+    </property>
+
+    <!--
+        Set Hadoop 2.* file system implementation class for IGFS.
+    -->
+    <property>
+        <name>fs.AbstractFileSystem.igfs.impl</name>
+        <value>org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem</value>
+    </property>
+
+    <!--
+        Disallow data node replacement since it does not make sense for IGFS nodes.
+    -->
+    <property>
+        <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
+        <value>NEVER</value>
+    </property>
+
+    <!--
+        Allow to write the job statistics into IGFS.
+    -->
+    <!--
+    <property>
+        <name>ignite.counters.writer</name>
+        <value>org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter</value>
+    </property>
+    -->
+
+    <!--
+        By default data is placed into the file /user/<user_name>/<job_id>/performance
+        You can override this path with using macro ${USER} that is to injection of submitter user name.
+    -->
+    <!--
+    <property>
+        <name>ignite.counters.fswriter.directory</name>
+        <value>/user/${USER}</value>
+    </property>
+    -->
+</configuration>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/config/hive-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/hive-site.ignite.xml b/modules/hadoop-impl/config/hive-site.ignite.xml
new file mode 100644
index 0000000..f278aab
--- /dev/null
+++ b/modules/hadoop-impl/config/hive-site.ignite.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    This template file contains settings needed to run Apache Hive queries
+    with Ignite In-Memory Accelerator.
+
+    You can replace '$HIVE_HOME/conf/hive-site.xml' file with this one or
+    run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop
+    and Hive client setup.
+-->
+<configuration>
+    <!--
+        Ignite requires query plan to be passed not using local resource.
+    -->
+    <property>
+        <name>hive.rpc.query.plan</name>
+        <value>true</value>
+    </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/config/mapred-site.ignite.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/config/mapred-site.ignite.xml b/modules/hadoop-impl/config/mapred-site.ignite.xml
new file mode 100644
index 0000000..a2ed437
--- /dev/null
+++ b/modules/hadoop-impl/config/mapred-site.ignite.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    This template file contains settings needed to run Apache Hadoop jobs
+    with Apache Ignite In-Memory Accelerator.
+
+    You can replace '$HADOOP_HOME/etc/hadoop/mapred-site.xml' file with this one
+    to run jobs on localhost (local node can be a part of distributed cluster though).
+    To run jobs on remote host you have to change jobtracker address to the REST address
+    of any running Ignite node.
+
+    Note that Ignite jars must be in Apache Hadoop client classpath to work
+    with this configuration.
+
+    Run script '$IGNITE_HOME/bin/setup-hadoop.{sh|bat}' for Apache Hadoop client setup.
+-->
+
+<configuration>
+    <!--
+        Framework name must be set to 'ignite'.
+    -->
+    <property>
+        <name>mapreduce.framework.name</name>
+        <value>ignite</value>
+    </property>
+
+    <!--
+        Job tracker address must be set to the REST address of any running Ignite node.
+    -->
+    <property>
+        <name>mapreduce.jobtracker.address</name>
+        <value>localhost:11211</value>
+    </property>
+
+    <!-- Parameters for job tuning. -->
+    <!--
+    <property>
+        <name>mapreduce.job.reduces</name>
+        <value>1</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.maps</name>
+        <value>4</value>
+    </property>
+    -->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/licenses/apache-2.0.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/licenses/apache-2.0.txt b/modules/hadoop-impl/licenses/apache-2.0.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/modules/hadoop-impl/licenses/apache-2.0.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/pom.xml
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/pom.xml b/modules/hadoop-impl/pom.xml
new file mode 100644
index 0000000..b82a913
--- /dev/null
+++ b/modules/hadoop-impl/pom.xml
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    POM file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.ignite</groupId>
+        <artifactId>ignite-parent</artifactId>
+        <version>1</version>
+        <relativePath>../../parent</relativePath>
+    </parent>
+
+    <artifactId>ignite-hadoop-impl</artifactId>
+    <version>1.7.0-SNAPSHOT</version>
+    <url>http://ignite.apache.org</url>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-hadoop</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-log4j</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-annotations</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-auth</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-common</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-mapreduce-client-core</artifactId>
+            <version>${hadoop.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.gridgain</groupId>
+            <artifactId>ignite-shmem</artifactId>
+            <scope>test</scope>
+            <version>1.0.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-beanutils</groupId>
+            <artifactId>commons-beanutils</artifactId>
+            <version>${commons.beanutils.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-spring</artifactId>
+            <version>${project.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-core</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.2</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <version>2.17</version>
+                <configuration>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
new file mode 100644
index 0000000..a01bfaf
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/BasicHadoopFileSystemFactory.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.hadoop.util.KerberosUserNameMapper;
+import org.apache.ignite.hadoop.util.UserNameMapper;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleAware;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Arrays;
+
+/**
+ * Simple Hadoop file system factory which delegates to {@code FileSystem.get()} on each call.
+ * <p>
+ * If {@code "fs.[prefix].impl.disable.cache"} is set to {@code true}, file system instances will be cached by Hadoop.
+ */
+public class BasicHadoopFileSystemFactory implements HadoopFileSystemFactory, Externalizable, LifecycleAware {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** File system URI. */
+    private String uri;
+
+    /** File system config paths. */
+    private String[] cfgPaths;
+
+    /** User name mapper. */
+    private UserNameMapper usrNameMapper;
+
+    /** Configuration of the secondary filesystem, never null. */
+    protected transient Configuration cfg;
+
+    /** Resulting URI. */
+    protected transient URI fullUri;
+
+    /**
+     * Constructor.
+     */
+    public BasicHadoopFileSystemFactory() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public final FileSystem get(String name) throws IOException {
+        String name0 = IgfsUtils.fixUserName(name);
+
+        if (usrNameMapper != null)
+            name0 = IgfsUtils.fixUserName(usrNameMapper.map(name0));
+
+        return getWithMappedName(name0);
+    }
+
+    /**
+     * Internal file system create routine.
+     *
+     * @param usrName User name.
+     * @return File system.
+     * @throws IOException If failed.
+     */
+    protected FileSystem getWithMappedName(String usrName) throws IOException {
+        assert cfg != null;
+
+        try {
+            // FileSystem.get() might delegate to ServiceLoader to get the list of file system implementation.
+            // And ServiceLoader is known to be sensitive to context classloader. Therefore, we change context
+            // classloader to classloader of current class to avoid strange class-cast-exceptions.
+            ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
+
+            try {
+                return create(usrName);
+            }
+            finally {
+                HadoopUtils.restoreContextClassLoader(oldLdr);
+            }
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            throw new IOException("Failed to create file system due to interrupt.", e);
+        }
+    }
+
+    /**
+     * Internal file system creation routine, invoked in correct class loader context.
+     *
+     * @param usrName User name.
+     * @return File system.
+     * @throws IOException If failed.
+     * @throws InterruptedException if the current thread is interrupted.
+     */
+    protected FileSystem create(String usrName) throws IOException, InterruptedException {
+        return FileSystem.get(fullUri, cfg, usrName);
+    }
+
+    /**
+     * Gets file system URI.
+     * <p>
+     * This URI will be used as a first argument when calling {@link FileSystem#get(URI, Configuration, String)}.
+     * <p>
+     * If not set, default URI will be picked from file system configuration using
+     * {@link FileSystem#getDefaultUri(Configuration)} method.
+     *
+     * @return File system URI.
+     */
+    @Nullable public String getUri() {
+        return uri;
+    }
+
+    /**
+     * Sets file system URI. See {@link #getUri()} for more information.
+     *
+     * @param uri File system URI.
+     */
+    public void setUri(@Nullable String uri) {
+        this.uri = uri;
+    }
+
+    /**
+     * Gets paths to additional file system configuration files (e.g. core-site.xml).
+     * <p>
+     * Path could be either absolute or relative to {@code IGNITE_HOME} environment variable.
+     * <p>
+     * All provided paths will be loaded in the order they provided and then applied to {@link Configuration}. It means
+     * that path order might be important in some cases.
+     * <p>
+     * <b>NOTE!</b> Factory can be serialized and transferred to other machines where instance of
+     * {@link IgniteHadoopFileSystem} resides. Corresponding paths must exist on these machines as well.
+     *
+     * @return Paths to file system configuration files.
+     */
+    @Nullable public String[] getConfigPaths() {
+        return cfgPaths;
+    }
+
+    /**
+     * Set paths to additional file system configuration files (e.g. core-site.xml). See {@link #getConfigPaths()} for
+     * more information.
+     *
+     * @param cfgPaths Paths to file system configuration files.
+     */
+    public void setConfigPaths(@Nullable String... cfgPaths) {
+        this.cfgPaths = cfgPaths;
+    }
+
+    /**
+     * Get optional user name mapper.
+     * <p>
+     * When IGFS is invoked from Hadoop, user name is passed along the way to ensure that request will be performed
+     * with proper user context. User name is passed in a simple form and doesn't contain any extended information,
+     * such as host, domain or Kerberos realm. You may use name mapper to translate plain user name to full user
+     * name required by security engine of the underlying file system.
+     * <p>
+     * For example you may want to use {@link KerberosUserNameMapper} to user name from {@code "johndoe"} to
+     * {@code "johndoe@YOUR.REALM.COM"}.
+     *
+     * @return User name mapper.
+     */
+    @Nullable public UserNameMapper getUserNameMapper() {
+        return usrNameMapper;
+    }
+
+    /**
+     * Set optional user name mapper. See {@link #getUserNameMapper()} for more information.
+     *
+     * @param usrNameMapper User name mapper.
+     */
+    public void setUserNameMapper(@Nullable UserNameMapper usrNameMapper) {
+        this.usrNameMapper = usrNameMapper;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteException {
+        cfg = HadoopUtils.safeCreateConfiguration();
+
+        if (cfgPaths != null) {
+            for (String cfgPath : cfgPaths) {
+                if (cfgPath == null)
+                    throw new NullPointerException("Configuration path cannot be null: " + Arrays.toString(cfgPaths));
+                else {
+                    URL url = U.resolveIgniteUrl(cfgPath);
+
+                    if (url == null) {
+                        // If secConfPath is given, it should be resolvable:
+                        throw new IgniteException("Failed to resolve secondary file system configuration path " +
+                            "(ensure that it exists locally and you have read access to it): " + cfgPath);
+                    }
+
+                    cfg.addResource(url);
+                }
+            }
+        }
+
+        // If secondary fs URI is not given explicitly, try to get it from the configuration:
+        if (uri == null)
+            fullUri = FileSystem.getDefaultUri(cfg);
+        else {
+            try {
+                fullUri = new URI(uri);
+            }
+            catch (URISyntaxException use) {
+                throw new IgniteException("Failed to resolve secondary file system URI: " + uri);
+            }
+        }
+
+        if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
+            ((LifecycleAware)usrNameMapper).start();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop() throws IgniteException {
+        if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
+            ((LifecycleAware)usrNameMapper).stop();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        U.writeString(out, uri);
+
+        if (cfgPaths != null) {
+            out.writeInt(cfgPaths.length);
+
+            for (String cfgPath : cfgPaths)
+                U.writeString(out, cfgPath);
+        }
+        else
+            out.writeInt(-1);
+
+        out.writeObject(usrNameMapper);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        uri = U.readString(in);
+
+        int cfgPathsCnt = in.readInt();
+
+        if (cfgPathsCnt != -1) {
+            cfgPaths = new String[cfgPathsCnt];
+
+            for (int i = 0; i < cfgPathsCnt; i++)
+                cfgPaths[i] = U.readString(in);
+        }
+
+        usrNameMapper = (UserNameMapper)in.readObject();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
new file mode 100644
index 0000000..bcbb082
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/CachingHadoopFileSystemFactory.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Caching Hadoop file system factory. Caches {@link FileSystem} instances on per-user basis. Doesn't rely on
+ * built-in Hadoop {@code FileSystem} caching mechanics. Separate {@code FileSystem} instance is created for each
+ * user instead.
+ * <p>
+ * This makes cache instance resistant to concurrent calls to {@link FileSystem#close()} in other parts of the user
+ * code. On the other hand, this might cause problems on some environments. E.g. if Kerberos is enabled, a call to
+ * {@link FileSystem#get(URI, Configuration, String)} will refresh Kerberos token. But this factory implementation
+ * calls this method only once per user what may lead to token expiration. In such cases it makes sense to either
+ * use {@link BasicHadoopFileSystemFactory} or implement your own factory.
+ */
+public class CachingHadoopFileSystemFactory extends BasicHadoopFileSystemFactory {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Per-user file system cache. */
+    private final transient HadoopLazyConcurrentMap<String, FileSystem> cache = new HadoopLazyConcurrentMap<>(
+        new HadoopLazyConcurrentMap.ValueFactory<String, FileSystem>() {
+            @Override public FileSystem createValue(String key) throws IOException {
+                return CachingHadoopFileSystemFactory.super.getWithMappedName(key);
+            }
+        }
+    );
+
+    /**
+     * Public non-arg constructor.
+     */
+    public CachingHadoopFileSystemFactory() {
+        // noop
+    }
+
+    /** {@inheritDoc} */
+    @Override public FileSystem getWithMappedName(String name) throws IOException {
+        return cache.getOrCreate(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteException {
+        super.start();
+
+        // Disable caching.
+        cfg.setBoolean(HadoopFileSystemsUtils.disableFsCachePropertyName(fullUri.getScheme()), true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop() throws IgniteException {
+        super.stop();
+
+        try {
+            cache.close();
+        }
+        catch (IgniteCheckedException ice) {
+            throw new IgniteException(ice);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
new file mode 100644
index 0000000..5ad08ab
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/HadoopFileSystemFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
+import org.apache.ignite.igfs.IgfsMode;
+import org.apache.ignite.lifecycle.LifecycleAware;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * Factory for Hadoop {@link FileSystem} used by {@link IgniteHadoopIgfsSecondaryFileSystem}.
+ * <p>
+ * {@link #get(String)} method will be used whenever a call to a target {@code FileSystem} is required.
+ * <p>
+ * It is implementation dependent whether to rely on built-in Hadoop file system cache, implement own caching facility
+ * or doesn't cache file systems at all.
+ * <p>
+ * Concrete factory may implement {@link LifecycleAware} interface. In this case start and stop callbacks will be
+ * performed by Ignite. You may want to implement some initialization or cleanup there.
+ * <p>
+ * Note that factory extends {@link Serializable} interface as it might be necessary to transfer factories over the
+ * wire to {@link IgniteHadoopFileSystem} if {@link IgfsMode#PROXY} is enabled for some file
+ * system paths.
+ */
+public interface HadoopFileSystemFactory extends Serializable {
+    /**
+     * Gets file system for the given user name.
+     *
+     * @param usrName User name
+     * @return File system.
+     * @throws IOException In case of error.
+     */
+    public FileSystem get(String usrName) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
new file mode 100644
index 0000000..8085826
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.hadoop.fs;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.T2;
+
+/**
+ * Statistic writer implementation that writes info into any Hadoop file system.
+ */
+public class IgniteHadoopFileSystemCounterWriter implements HadoopCounterWriter {
+    /** */
+    public static final String PERFORMANCE_COUNTER_FILE_NAME = "performance";
+
+    /** */
+    public static final String COUNTER_WRITER_DIR_PROPERTY = "ignite.counters.fswriter.directory";
+
+    /** */
+    private static final String USER_MACRO = "${USER}";
+
+    /** */
+    private static final String DEFAULT_COUNTER_WRITER_DIR = "/user/" + USER_MACRO;
+
+    /** {@inheritDoc} */
+    @Override public void write(HadoopJob job, HadoopCounters cntrs)
+        throws IgniteCheckedException {
+
+        Configuration hadoopCfg = HadoopUtils.safeCreateConfiguration();
+
+        final HadoopJobInfo jobInfo = job.info();
+
+        final HadoopJobId jobId = job.id();
+
+        for (Map.Entry<String, String> e : ((HadoopDefaultJobInfo)jobInfo).properties().entrySet())
+            hadoopCfg.set(e.getKey(), e.getValue());
+
+        String user = jobInfo.user();
+
+        user = IgfsUtils.fixUserName(user);
+
+        String dir = jobInfo.property(COUNTER_WRITER_DIR_PROPERTY);
+
+        if (dir == null)
+            dir = DEFAULT_COUNTER_WRITER_DIR;
+
+        Path jobStatPath = new Path(new Path(dir.replace(USER_MACRO, user)), jobId.toString());
+
+        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
+
+        try {
+            hadoopCfg.set(MRJobConfig.USER_NAME, user);
+
+            FileSystem fs = ((HadoopV2Job)job).fileSystem(jobStatPath.toUri(), hadoopCfg);
+
+            fs.mkdirs(jobStatPath);
+
+            try (PrintStream out = new PrintStream(fs.create(new Path(jobStatPath, PERFORMANCE_COUNTER_FILE_NAME)))) {
+                for (T2<String, Long> evt : perfCntr.evts()) {
+                    out.print(evt.get1());
+                    out.print(':');
+                    out.println(evt.get2().toString());
+                }
+
+                out.flush();
+            }
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file


[43/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
new file mode 100644
index 0000000..3eb819b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobStatus;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_CANCELLING;
+
+/**
+ * Submit job task.
+ */
+public class HadoopProtocolSubmitJobTask extends HadoopProtocolTaskAdapter<HadoopJobStatus> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop,
+        HadoopProtocolTaskArguments args) throws IgniteCheckedException {
+        UUID nodeId = UUID.fromString(args.<String>get(0));
+        Integer id = args.get(1);
+        HadoopDefaultJobInfo info = args.get(2);
+
+        assert nodeId != null;
+        assert id != null;
+        assert info != null;
+
+        HadoopJobId jobId = new HadoopJobId(nodeId, id);
+
+        hadoop.submit(jobId, info);
+
+        HadoopJobStatus res = hadoop.status(jobId);
+
+        if (res == null) // Submission failed.
+            res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1);
+
+        return res;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
new file mode 100644
index 0000000..c3227ae
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.compute.ComputeJob;
+import org.apache.ignite.compute.ComputeJobContext;
+import org.apache.ignite.compute.ComputeJobResult;
+import org.apache.ignite.compute.ComputeJobResultPolicy;
+import org.apache.ignite.compute.ComputeTask;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.hadoop.Hadoop;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.resources.JobContextResource;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop protocol task adapter.
+ */
+public abstract class HadoopProtocolTaskAdapter<R> implements ComputeTask<HadoopProtocolTaskArguments, R> {
+    /** {@inheritDoc} */
+    @Nullable @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid,
+        @Nullable HadoopProtocolTaskArguments arg) {
+        return Collections.singletonMap(new Job(arg), subgrid.get(0));
+    }
+
+    /** {@inheritDoc} */
+    @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
+        return ComputeJobResultPolicy.REDUCE;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public R reduce(List<ComputeJobResult> results) {
+        if (!F.isEmpty(results)) {
+            ComputeJobResult res = results.get(0);
+
+            return res.getData();
+        }
+        else
+            return null;
+    }
+
+    /**
+     * Job wrapper.
+     */
+    private class Job implements ComputeJob {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** */
+        @SuppressWarnings("UnusedDeclaration")
+        @JobContextResource
+        private ComputeJobContext jobCtx;
+
+        /** Argument. */
+        private final HadoopProtocolTaskArguments args;
+
+        /**
+         * Constructor.
+         *
+         * @param args Job argument.
+         */
+        private Job(HadoopProtocolTaskArguments args) {
+            this.args = args;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cancel() {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Nullable @Override public Object execute() {
+            try {
+                return run(jobCtx, ((IgniteEx)ignite).hadoop(), args);
+            }
+            catch (IgniteCheckedException e) {
+                throw U.convertException(e);
+            }
+        }
+    }
+
+    /**
+     * Run the task.
+     *
+     * @param jobCtx Job context.
+     * @param hadoop Hadoop facade.
+     * @param args Arguments.
+     * @return Job result.
+     * @throws IgniteCheckedException If failed.
+     */
+    public abstract R run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args)
+        throws IgniteCheckedException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
new file mode 100644
index 0000000..e497454
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskArguments.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.proto;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Task arguments.
+ */
+public class HadoopProtocolTaskArguments implements Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Arguments. */
+    private Object[] args;
+
+    /**
+     * {@link Externalizable} support.
+     */
+    public HadoopProtocolTaskArguments() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param args Arguments.
+     */
+    public HadoopProtocolTaskArguments(Object... args) {
+        this.args = args;
+    }
+
+    /**
+     * @param idx Argument index.
+     * @return Argument.
+     */
+    @SuppressWarnings("unchecked")
+    @Nullable public <T> T get(int idx) {
+        return (args != null && args.length > idx) ? (T)args[idx] : null;
+    }
+
+    /**
+     * @return Size.
+     */
+    public int size() {
+        return args != null ? args.length : 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        U.writeArray(out, args);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        args = U.readArray(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopProtocolTaskArguments.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
new file mode 100644
index 0000000..769bdc4
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffle.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle;
+
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.GridTopic;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
+import org.apache.ignite.internal.processors.hadoop.HadoopContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiPredicate;
+
+/**
+ * Shuffle.
+ */
+public class HadoopShuffle extends HadoopComponent {
+    /** */
+    private final ConcurrentMap<HadoopJobId, HadoopShuffleJob<UUID>> jobs = new ConcurrentHashMap<>();
+
+    /** */
+    protected final GridUnsafeMemory mem = new GridUnsafeMemory(0);
+
+    /** {@inheritDoc} */
+    @Override public void start(HadoopContext ctx) throws IgniteCheckedException {
+        super.start(ctx);
+
+        ctx.kernalContext().io().addUserMessageListener(GridTopic.TOPIC_HADOOP,
+            new IgniteBiPredicate<UUID, Object>() {
+                @Override public boolean apply(UUID nodeId, Object msg) {
+                    return onMessageReceived(nodeId, (HadoopMessage)msg);
+                }
+            });
+    }
+
+    /**
+     * Stops shuffle.
+     *
+     * @param cancel If should cancel all ongoing activities.
+     */
+    @Override public void stop(boolean cancel) {
+        for (HadoopShuffleJob job : jobs.values()) {
+            try {
+                job.close();
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to close job.", e);
+            }
+        }
+
+        jobs.clear();
+    }
+
+    /**
+     * Creates new shuffle job.
+     *
+     * @param jobId Job ID.
+     * @return Created shuffle job.
+     * @throws IgniteCheckedException If job creation failed.
+     */
+    private HadoopShuffleJob<UUID> newJob(HadoopJobId jobId) throws IgniteCheckedException {
+        HadoopMapReducePlan plan = ctx.jobTracker().plan(jobId);
+
+        HadoopShuffleJob<UUID> job = new HadoopShuffleJob<>(ctx.localNodeId(), log,
+            ctx.jobTracker().job(jobId, null), mem, plan.reducers(), plan.reducers(ctx.localNodeId()));
+
+        UUID[] rdcAddrs = new UUID[plan.reducers()];
+
+        for (int i = 0; i < rdcAddrs.length; i++) {
+            UUID nodeId = plan.nodeForReducer(i);
+
+            assert nodeId != null : "Plan is missing node for reducer [plan=" + plan + ", rdc=" + i + ']';
+
+            rdcAddrs[i] = nodeId;
+        }
+
+        boolean init = job.initializeReduceAddresses(rdcAddrs);
+
+        assert init;
+
+        return job;
+    }
+
+    /**
+     * @param nodeId Node ID to send message to.
+     * @param msg Message to send.
+     * @throws IgniteCheckedException If send failed.
+     */
+    private void send0(UUID nodeId, Object msg) throws IgniteCheckedException {
+        ClusterNode node = ctx.kernalContext().discovery().node(nodeId);
+
+        ctx.kernalContext().io().sendUserMessage(F.asList(node), msg, GridTopic.TOPIC_HADOOP, false, 0);
+    }
+
+    /**
+     * @param jobId Task info.
+     * @return Shuffle job.
+     */
+    private HadoopShuffleJob<UUID> job(HadoopJobId jobId) throws IgniteCheckedException {
+        HadoopShuffleJob<UUID> res = jobs.get(jobId);
+
+        if (res == null) {
+            res = newJob(jobId);
+
+            HadoopShuffleJob<UUID> old = jobs.putIfAbsent(jobId, res);
+
+            if (old != null) {
+                res.close();
+
+                res = old;
+            }
+            else if (res.reducersInitialized())
+                startSending(res);
+        }
+
+        return res;
+    }
+
+    /**
+     * Starts message sending thread.
+     *
+     * @param shuffleJob Job to start sending for.
+     */
+    private void startSending(HadoopShuffleJob<UUID> shuffleJob) {
+        shuffleJob.startSending(ctx.kernalContext().gridName(),
+            new IgniteInClosure2X<UUID, HadoopShuffleMessage>() {
+                @Override public void applyx(UUID dest, HadoopShuffleMessage msg) throws IgniteCheckedException {
+                    send0(dest, msg);
+                }
+            }
+        );
+    }
+
+    /**
+     * Message received callback.
+     *
+     * @param src Sender node ID.
+     * @param msg Received message.
+     * @return {@code True}.
+     */
+    public boolean onMessageReceived(UUID src, HadoopMessage msg) {
+        if (msg instanceof HadoopShuffleMessage) {
+            HadoopShuffleMessage m = (HadoopShuffleMessage)msg;
+
+            try {
+                job(m.jobId()).onShuffleMessage(m);
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Message handling failed.", e);
+            }
+
+            try {
+                // Reply with ack.
+                send0(src, new HadoopShuffleAck(m.id(), m.jobId()));
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to reply back to shuffle message sender [snd=" + src + ", msg=" + msg + ']', e);
+            }
+        }
+        else if (msg instanceof HadoopShuffleAck) {
+            HadoopShuffleAck m = (HadoopShuffleAck)msg;
+
+            try {
+                job(m.jobId()).onShuffleAck(m);
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Message handling failed.", e);
+            }
+        }
+        else
+            throw new IllegalStateException("Unknown message type received to Hadoop shuffle [src=" + src +
+                ", msg=" + msg + ']');
+
+        return true;
+    }
+
+    /**
+     * @param taskCtx Task info.
+     * @return Output.
+     */
+    public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        return job(taskCtx.taskInfo().jobId()).output(taskCtx);
+    }
+
+    /**
+     * @param taskCtx Task info.
+     * @return Input.
+     */
+    public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        return job(taskCtx.taskInfo().jobId()).input(taskCtx);
+    }
+
+    /**
+     * @param jobId Job id.
+     */
+    public void jobFinished(HadoopJobId jobId) {
+        HadoopShuffleJob job = jobs.remove(jobId);
+
+        if (job != null) {
+            try {
+                job.close();
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to close job: " + jobId, e);
+            }
+        }
+    }
+
+    /**
+     * Flushes all the outputs for the given job to remote nodes.
+     *
+     * @param jobId Job ID.
+     * @return Future.
+     */
+    public IgniteInternalFuture<?> flush(HadoopJobId jobId) {
+        HadoopShuffleJob job = jobs.get(jobId);
+
+        if (job == null)
+            return new GridFinishedFuture<>();
+
+        try {
+            return job.flush();
+        }
+        catch (IgniteCheckedException e) {
+            return new GridFinishedFuture<>(e);
+        }
+    }
+
+    /**
+     * @return Memory.
+     */
+    public GridUnsafeMemory memory() {
+        return mem;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
new file mode 100644
index 0000000..6013ec6
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Acknowledgement message.
+ */
+public class HadoopShuffleAck implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    @GridToStringInclude
+    private long msgId;
+
+    /** */
+    @GridToStringInclude
+    private HadoopJobId jobId;
+
+    /**
+     *
+     */
+    public HadoopShuffleAck() {
+        // No-op.
+    }
+
+    /**
+     * @param msgId Message ID.
+     */
+    public HadoopShuffleAck(long msgId, HadoopJobId jobId) {
+        assert jobId != null;
+
+        this.msgId = msgId;
+        this.jobId = jobId;
+    }
+
+    /**
+     * @return Message ID.
+     */
+    public long id() {
+        return msgId;
+    }
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        jobId.writeExternal(out);
+        out.writeLong(msgId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobId = new HadoopJobId();
+
+        jobId.readExternal(in);
+        msgId = in.readLong();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopShuffleAck.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
new file mode 100644
index 0000000..b940c72
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java
@@ -0,0 +1,612 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopConcurrentHashMultimap;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipList;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.future.GridCompoundFuture;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.io.GridUnsafeDataInput;
+import org.apache.ignite.internal.util.lang.GridClosureException;
+import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.thread.IgniteThread;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.PARTITION_HASHMAP_SIZE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_REDUCER_NO_SORTING;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
+
+/**
+ * Shuffle job.
+ */
+public class HadoopShuffleJob<T> implements AutoCloseable {
+    /** */
+    private static final int MSG_BUF_SIZE = 128 * 1024;
+
+    /** */
+    private final HadoopJob job;
+
+    /** */
+    private final GridUnsafeMemory mem;
+
+    /** */
+    private final boolean needPartitioner;
+
+    /** Collection of task contexts for each reduce task. */
+    private final Map<Integer, HadoopTaskContext> reducersCtx = new HashMap<>();
+
+    /** Reducers addresses. */
+    private T[] reduceAddrs;
+
+    /** Local reducers address. */
+    private final T locReduceAddr;
+
+    /** */
+    private final HadoopShuffleMessage[] msgs;
+
+    /** */
+    private final AtomicReferenceArray<HadoopMultimap> maps;
+
+    /** */
+    private volatile IgniteInClosure2X<T, HadoopShuffleMessage> io;
+
+    /** */
+    protected ConcurrentMap<Long, IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>>> sentMsgs =
+        new ConcurrentHashMap<>();
+
+    /** */
+    private volatile GridWorker snd;
+
+    /** Latch for remote addresses waiting. */
+    private final CountDownLatch ioInitLatch = new CountDownLatch(1);
+
+    /** Finished flag. Set on flush or close. */
+    private volatile boolean flushed;
+
+    /** */
+    private final IgniteLogger log;
+
+    /**
+     * @param locReduceAddr Local reducer address.
+     * @param log Logger.
+     * @param job Job.
+     * @param mem Memory.
+     * @param totalReducerCnt Amount of reducers in the Job.
+     * @param locReducers Reducers will work on current node.
+     * @throws IgniteCheckedException If error.
+     */
+    public HadoopShuffleJob(T locReduceAddr, IgniteLogger log, HadoopJob job, GridUnsafeMemory mem,
+        int totalReducerCnt, int[] locReducers) throws IgniteCheckedException {
+        this.locReduceAddr = locReduceAddr;
+        this.job = job;
+        this.mem = mem;
+        this.log = log.getLogger(HadoopShuffleJob.class);
+
+        if (!F.isEmpty(locReducers)) {
+            for (int rdc : locReducers) {
+                HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, job.id(), rdc, 0, null);
+
+                reducersCtx.put(rdc, job.getTaskContext(taskInfo));
+            }
+        }
+
+        needPartitioner = totalReducerCnt > 1;
+
+        maps = new AtomicReferenceArray<>(totalReducerCnt);
+        msgs = new HadoopShuffleMessage[totalReducerCnt];
+    }
+
+    /**
+     * @param reduceAddrs Addresses of reducers.
+     * @return {@code True} if addresses were initialized by this call.
+     */
+    public boolean initializeReduceAddresses(T[] reduceAddrs) {
+        if (this.reduceAddrs == null) {
+            this.reduceAddrs = reduceAddrs;
+
+            return true;
+        }
+
+        return false;
+    }
+
+    /**
+     * @return {@code True} if reducers addresses were initialized.
+     */
+    public boolean reducersInitialized() {
+        return reduceAddrs != null;
+    }
+
+    /**
+     * @param gridName Grid name.
+     * @param io IO Closure for sending messages.
+     */
+    @SuppressWarnings("BusyWait")
+    public void startSending(String gridName, IgniteInClosure2X<T, HadoopShuffleMessage> io) {
+        assert snd == null;
+        assert io != null;
+
+        this.io = io;
+
+        if (!flushed) {
+            snd = new GridWorker(gridName, "hadoop-shuffle-" + job.id(), log) {
+                @Override protected void body() throws InterruptedException {
+                    try {
+                        while (!isCancelled()) {
+                            Thread.sleep(5);
+
+                            collectUpdatesAndSend(false);
+                        }
+                    }
+                    catch (IgniteCheckedException e) {
+                        throw new IllegalStateException(e);
+                    }
+                }
+            };
+
+            new IgniteThread(snd).start();
+        }
+
+        ioInitLatch.countDown();
+    }
+
+    /**
+     * @param maps Maps.
+     * @param idx Index.
+     * @return Map.
+     */
+    private HadoopMultimap getOrCreateMap(AtomicReferenceArray<HadoopMultimap> maps, int idx) {
+        HadoopMultimap map = maps.get(idx);
+
+        if (map == null) { // Create new map.
+            map = get(job.info(), SHUFFLE_REDUCER_NO_SORTING, false) ?
+                new HadoopConcurrentHashMultimap(job.info(), mem, get(job.info(), PARTITION_HASHMAP_SIZE, 8 * 1024)):
+                new HadoopSkipList(job.info(), mem);
+
+            if (!maps.compareAndSet(idx, null, map)) {
+                map.close();
+
+                return maps.get(idx);
+            }
+        }
+
+        return map;
+    }
+
+    /**
+     * @param msg Message.
+     * @throws IgniteCheckedException Exception.
+     */
+    public void onShuffleMessage(HadoopShuffleMessage msg) throws IgniteCheckedException {
+        assert msg.buffer() != null;
+        assert msg.offset() > 0;
+
+        HadoopTaskContext taskCtx = reducersCtx.get(msg.reducer());
+
+        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(taskCtx.counters(), null);
+
+        perfCntr.onShuffleMessage(msg.reducer(), U.currentTimeMillis());
+
+        HadoopMultimap map = getOrCreateMap(maps, msg.reducer());
+
+        // Add data from message to the map.
+        try (HadoopMultimap.Adder adder = map.startAdding(taskCtx)) {
+            final GridUnsafeDataInput dataInput = new GridUnsafeDataInput();
+            final UnsafeValue val = new UnsafeValue(msg.buffer());
+
+            msg.visit(new HadoopShuffleMessage.Visitor() {
+                /** */
+                private HadoopMultimap.Key key;
+
+                @Override public void onKey(byte[] buf, int off, int len) throws IgniteCheckedException {
+                    dataInput.bytes(buf, off, off + len);
+
+                    key = adder.addKey(dataInput, key);
+                }
+
+                @Override public void onValue(byte[] buf, int off, int len) {
+                    val.off = off;
+                    val.size = len;
+
+                    key.add(val);
+                }
+            });
+        }
+    }
+
+    /**
+     * @param ack Shuffle ack.
+     */
+    @SuppressWarnings("ConstantConditions")
+    public void onShuffleAck(HadoopShuffleAck ack) {
+        IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup = sentMsgs.get(ack.id());
+
+        if (tup != null)
+            tup.get2().onDone();
+        else
+            log.warning("Received shuffle ack for not registered shuffle id: " + ack);
+    }
+
+    /**
+     * Unsafe value.
+     */
+    private static class UnsafeValue implements HadoopMultimap.Value {
+        /** */
+        private final byte[] buf;
+
+        /** */
+        private int off;
+
+        /** */
+        private int size;
+
+        /**
+         * @param buf Buffer.
+         */
+        private UnsafeValue(byte[] buf) {
+            assert buf != null;
+
+            this.buf = buf;
+        }
+
+        /** */
+        @Override public int size() {
+            return size;
+        }
+
+        /** */
+        @Override public void copyTo(long ptr) {
+            GridUnsafe.copyMemory(buf, GridUnsafe.BYTE_ARR_OFF + off, null, ptr, size);
+        }
+    }
+
+    /**
+     * Sends map updates to remote reducers.
+     */
+    private void collectUpdatesAndSend(boolean flush) throws IgniteCheckedException {
+        for (int i = 0; i < maps.length(); i++) {
+            HadoopMultimap map = maps.get(i);
+
+            if (map == null || locReduceAddr.equals(reduceAddrs[i]))
+                continue; // Skip empty map and local node.
+
+            if (msgs[i] == null)
+                msgs[i] = new HadoopShuffleMessage(job.id(), i, MSG_BUF_SIZE);
+
+            final int idx = i;
+
+            map.visit(false, new HadoopMultimap.Visitor() {
+                /** */
+                private long keyPtr;
+
+                /** */
+                private int keySize;
+
+                /** */
+                private boolean keyAdded;
+
+                /** {@inheritDoc} */
+                @Override public void onKey(long keyPtr, int keySize) {
+                    this.keyPtr = keyPtr;
+                    this.keySize = keySize;
+
+                    keyAdded = false;
+                }
+
+                private boolean tryAdd(long valPtr, int valSize) {
+                    HadoopShuffleMessage msg = msgs[idx];
+
+                    if (!keyAdded) { // Add key and value.
+                        int size = keySize + valSize;
+
+                        if (!msg.available(size, false))
+                            return false;
+
+                        msg.addKey(keyPtr, keySize);
+                        msg.addValue(valPtr, valSize);
+
+                        keyAdded = true;
+
+                        return true;
+                    }
+
+                    if (!msg.available(valSize, true))
+                        return false;
+
+                    msg.addValue(valPtr, valSize);
+
+                    return true;
+                }
+
+                /** {@inheritDoc} */
+                @Override public void onValue(long valPtr, int valSize) {
+                    if (tryAdd(valPtr, valSize))
+                        return;
+
+                    send(idx, keySize + valSize);
+
+                    keyAdded = false;
+
+                    if (!tryAdd(valPtr, valSize))
+                        throw new IllegalStateException();
+                }
+            });
+
+            if (flush && msgs[i].offset() != 0)
+                send(i, 0);
+        }
+    }
+
+    /**
+     * @param idx Index of message.
+     * @param newBufMinSize Min new buffer size.
+     */
+    private void send(final int idx, int newBufMinSize) {
+        final GridFutureAdapter<?> fut = new GridFutureAdapter<>();
+
+        HadoopShuffleMessage msg = msgs[idx];
+
+        final long msgId = msg.id();
+
+        IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> old = sentMsgs.putIfAbsent(msgId,
+            new IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>>(msg, fut));
+
+        assert old == null;
+
+        try {
+            io.apply(reduceAddrs[idx], msg);
+        }
+        catch (GridClosureException e) {
+            fut.onDone(U.unwrap(e));
+        }
+
+        fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
+            @Override public void apply(IgniteInternalFuture<?> f) {
+                try {
+                    f.get();
+
+                    // Clean up the future from map only if there was no exception.
+                    // Otherwise flush() should fail.
+                    sentMsgs.remove(msgId);
+                }
+                catch (IgniteCheckedException e) {
+                    log.error("Failed to send message.", e);
+                }
+            }
+        });
+
+        msgs[idx] = newBufMinSize == 0 ? null : new HadoopShuffleMessage(job.id(), idx,
+            Math.max(MSG_BUF_SIZE, newBufMinSize));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IgniteCheckedException {
+        if (snd != null) {
+            snd.cancel();
+
+            try {
+                snd.join();
+            }
+            catch (InterruptedException e) {
+                throw new IgniteInterruptedCheckedException(e);
+            }
+        }
+
+        close(maps);
+    }
+
+    /**
+     * @param maps Maps.
+     */
+    private void close(AtomicReferenceArray<HadoopMultimap> maps) {
+        for (int i = 0; i < maps.length(); i++) {
+            HadoopMultimap map = maps.get(i);
+
+            if (map != null)
+                map.close();
+        }
+    }
+
+    /**
+     * @return Future.
+     */
+    @SuppressWarnings("unchecked")
+    public IgniteInternalFuture<?> flush() throws IgniteCheckedException {
+        if (log.isDebugEnabled())
+            log.debug("Flushing job " + job.id() + " on address " + locReduceAddr);
+
+        flushed = true;
+
+        if (maps.length() == 0)
+            return new GridFinishedFuture<>();
+
+        U.await(ioInitLatch);
+
+        GridWorker snd0 = snd;
+
+        if (snd0 != null) {
+            if (log.isDebugEnabled())
+                log.debug("Cancelling sender thread.");
+
+            snd0.cancel();
+
+            try {
+                snd0.join();
+
+                if (log.isDebugEnabled())
+                    log.debug("Finished waiting for sending thread to complete on shuffle job flush: " + job.id());
+            }
+            catch (InterruptedException e) {
+                throw new IgniteInterruptedCheckedException(e);
+            }
+        }
+
+        collectUpdatesAndSend(true); // With flush.
+
+        if (log.isDebugEnabled())
+            log.debug("Finished sending collected updates to remote reducers: " + job.id());
+
+        GridCompoundFuture fut = new GridCompoundFuture<>();
+
+        for (IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup : sentMsgs.values())
+            fut.add(tup.get2());
+
+        fut.markInitialized();
+
+        if (log.isDebugEnabled())
+            log.debug("Collected futures to compound futures for flush: " + sentMsgs.size());
+
+        return fut;
+    }
+
+    /**
+     * @param taskCtx Task context.
+     * @return Output.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        switch (taskCtx.taskInfo().type()) {
+            case MAP:
+                assert !job.info().hasCombiner() : "The output creation is allowed if combiner has not been defined.";
+
+            case COMBINE:
+                return new PartitionedOutput(taskCtx);
+
+            default:
+                throw new IllegalStateException("Illegal type: " + taskCtx.taskInfo().type());
+        }
+    }
+
+    /**
+     * @param taskCtx Task context.
+     * @return Input.
+     * @throws IgniteCheckedException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        switch (taskCtx.taskInfo().type()) {
+            case REDUCE:
+                int reducer = taskCtx.taskInfo().taskNumber();
+
+                HadoopMultimap m = maps.get(reducer);
+
+                if (m != null)
+                    return m.input(taskCtx);
+
+                return new HadoopTaskInput() { // Empty input.
+                    @Override public boolean next() {
+                        return false;
+                    }
+
+                    @Override public Object key() {
+                        throw new IllegalStateException();
+                    }
+
+                    @Override public Iterator<?> values() {
+                        throw new IllegalStateException();
+                    }
+
+                    @Override public void close() {
+                        // No-op.
+                    }
+                };
+
+            default:
+                throw new IllegalStateException("Illegal type: " + taskCtx.taskInfo().type());
+        }
+    }
+
+    /**
+     * Partitioned output.
+     */
+    private class PartitionedOutput implements HadoopTaskOutput {
+        /** */
+        private final HadoopTaskOutput[] adders = new HadoopTaskOutput[maps.length()];
+
+        /** */
+        private HadoopPartitioner partitioner;
+
+        /** */
+        private final HadoopTaskContext taskCtx;
+
+        /**
+         * Constructor.
+         * @param taskCtx Task context.
+         */
+        private PartitionedOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+            this.taskCtx = taskCtx;
+
+            if (needPartitioner)
+                partitioner = taskCtx.partitioner();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(Object key, Object val) throws IgniteCheckedException {
+            int part = 0;
+
+            if (partitioner != null) {
+                part = partitioner.partition(key, val, adders.length);
+
+                if (part < 0 || part >= adders.length)
+                    throw new IgniteCheckedException("Invalid partition: " + part);
+            }
+
+            HadoopTaskOutput out = adders[part];
+
+            if (out == null)
+                adders[part] = out = getOrCreateMap(maps, part).startAdding(taskCtx);
+
+            out.write(key, val);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            for (HadoopTaskOutput adder : adders) {
+                if (adder != null)
+                    adder.close();
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
new file mode 100644
index 0000000..69dfe64
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.message.HadoopMessage;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Shuffle message.
+ */
+public class HadoopShuffleMessage implements HadoopMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private static final AtomicLong ids = new AtomicLong();
+
+    /** */
+    private static final byte MARKER_KEY = (byte)17;
+
+    /** */
+    private static final byte MARKER_VALUE = (byte)31;
+
+    /** */
+    @GridToStringInclude
+    private long msgId;
+
+    /** */
+    @GridToStringInclude
+    private HadoopJobId jobId;
+
+    /** */
+    @GridToStringInclude
+    private int reducer;
+
+    /** */
+    private byte[] buf;
+
+    /** */
+    @GridToStringInclude
+    private int off;
+
+    /**
+     *
+     */
+    public HadoopShuffleMessage() {
+        // No-op.
+    }
+
+    /**
+     * @param size Size.
+     */
+    public HadoopShuffleMessage(HadoopJobId jobId, int reducer, int size) {
+        assert jobId != null;
+
+        buf = new byte[size];
+
+        this.jobId = jobId;
+        this.reducer = reducer;
+
+        msgId = ids.incrementAndGet();
+    }
+
+    /**
+     * @return Message ID.
+     */
+    public long id() {
+        return msgId;
+    }
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /**
+     * @return Reducer.
+     */
+    public int reducer() {
+        return reducer;
+    }
+
+    /**
+     * @return Buffer.
+     */
+    public byte[] buffer() {
+        return buf;
+    }
+
+    /**
+     * @return Offset.
+     */
+    public int offset() {
+        return off;
+    }
+
+    /**
+     * @param size Size.
+     * @param valOnly Only value wll be added.
+     * @return {@code true} If this message can fit additional data of this size
+     */
+    public boolean available(int size, boolean valOnly) {
+        size += valOnly ? 5 : 10;
+
+        if (off + size > buf.length) {
+            if (off == 0) { // Resize if requested size is too big.
+                buf = new byte[size];
+
+                return true;
+            }
+
+            return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * @param keyPtr Key pointer.
+     * @param keySize Key size.
+     */
+    public void addKey(long keyPtr, int keySize) {
+        add(MARKER_KEY, keyPtr, keySize);
+    }
+
+    /**
+     * @param valPtr Value pointer.
+     * @param valSize Value size.
+     */
+    public void addValue(long valPtr, int valSize) {
+        add(MARKER_VALUE, valPtr, valSize);
+    }
+
+    /**
+     * @param marker Marker.
+     * @param ptr Pointer.
+     * @param size Size.
+     */
+    private void add(byte marker, long ptr, int size) {
+        buf[off++] = marker;
+
+        GridUnsafe.putInt(buf, GridUnsafe.BYTE_ARR_OFF + off, size);
+
+        off += 4;
+
+        GridUnsafe.copyMemory(null, ptr, buf, GridUnsafe.BYTE_ARR_OFF + off, size);
+
+        off += size;
+    }
+
+    /**
+     * @param v Visitor.
+     */
+    public void visit(Visitor v) throws IgniteCheckedException {
+        for (int i = 0; i < off;) {
+            byte marker = buf[i++];
+
+            int size = GridUnsafe.getInt(buf, GridUnsafe.BYTE_ARR_OFF + i);
+
+            i += 4;
+
+            if (marker == MARKER_VALUE)
+                v.onValue(buf, i, size);
+            else if (marker == MARKER_KEY)
+                v.onKey(buf, i, size);
+            else
+                throw new IllegalStateException();
+
+            i += size;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        jobId.writeExternal(out);
+        out.writeLong(msgId);
+        out.writeInt(reducer);
+        out.writeInt(off);
+        U.writeByteArray(out, buf);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        jobId = new HadoopJobId();
+
+        jobId.readExternal(in);
+        msgId = in.readLong();
+        reducer = in.readInt();
+        off = in.readInt();
+        buf = U.readByteArray(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopShuffleMessage.class, this);
+    }
+
+    /**
+     * Visitor.
+     */
+    public static interface Visitor {
+        /**
+         * @param buf Buffer.
+         * @param off Offset.
+         * @param len Length.
+         */
+        public void onKey(byte[] buf, int off, int len) throws IgniteCheckedException;
+
+        /**
+         * @param buf Buffer.
+         * @param off Offset.
+         * @param len Length.
+         */
+        public void onValue(byte[] buf, int off, int len) throws IgniteCheckedException;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
new file mode 100644
index 0000000..ffa7871
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java
@@ -0,0 +1,616 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.io.DataInput;
+import java.util.Random;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLongArray;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Multimap for map reduce intermediate results.
+ */
+public class HadoopConcurrentHashMultimap extends HadoopHashMultimapBase {
+    /** */
+    private final AtomicReference<State> state = new AtomicReference<>(State.READING_WRITING);
+
+    /** */
+    private volatile AtomicLongArray oldTbl;
+
+    /** */
+    private volatile AtomicLongArray newTbl;
+
+    /** */
+    private final AtomicInteger keys = new AtomicInteger();
+
+    /** */
+    private final CopyOnWriteArrayList<AdderImpl> adders = new CopyOnWriteArrayList<>();
+
+    /** */
+    private final AtomicInteger inputs = new AtomicInteger();
+
+    /**
+     * @param jobInfo Job info.
+     * @param mem Memory.
+     * @param cap Initial capacity.
+     */
+    public HadoopConcurrentHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) {
+        super(jobInfo, mem);
+
+        assert U.isPow2(cap);
+
+        newTbl = oldTbl = new AtomicLongArray(cap);
+    }
+
+    /**
+     * @return Number of keys.
+     */
+    public long keys() {
+        int res = keys.get();
+
+        for (AdderImpl adder : adders)
+            res += adder.locKeys.get();
+
+        return res;
+    }
+
+    /**
+     * @return Current table capacity.
+     */
+    @Override public int capacity() {
+        return oldTbl.length();
+    }
+
+    /**
+     * @return Adder object.
+     * @param ctx Task context.
+     */
+    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
+        if (inputs.get() != 0)
+            throw new IllegalStateException("Active inputs.");
+
+        if (state.get() == State.CLOSING)
+            throw new IllegalStateException("Closed.");
+
+        return new AdderImpl(ctx);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        assert inputs.get() == 0 : inputs.get();
+        assert adders.isEmpty() : adders.size();
+
+        state(State.READING_WRITING, State.CLOSING);
+
+        if (keys() == 0)
+            return;
+
+        super.close();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long meta(int idx) {
+        return oldTbl.get(idx);
+    }
+
+    /**
+     * Incrementally visits all the keys and values in the map.
+     *
+     * @param ignoreLastVisited Flag indicating that visiting must be started from the beginning.
+     * @param v Visitor.
+     * @return {@code false} If visiting was impossible due to rehashing.
+     */
+    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
+        if (!state.compareAndSet(State.READING_WRITING, State.VISITING)) {
+            assert state.get() != State.CLOSING;
+
+            return false; // Can not visit while rehashing happens.
+        }
+
+        AtomicLongArray tbl0 = oldTbl;
+
+        for (int i = 0; i < tbl0.length(); i++) {
+            long meta = tbl0.get(i);
+
+            while (meta != 0) {
+                long valPtr = value(meta);
+
+                long lastVisited = ignoreLastVisited ? 0 : lastVisitedValue(meta);
+
+                if (valPtr != lastVisited) {
+                    v.onKey(key(meta), keySize(meta));
+
+                    lastVisitedValue(meta, valPtr); // Set it to the first value in chain.
+
+                    do {
+                        v.onValue(valPtr + 12, valueSize(valPtr));
+
+                        valPtr = nextValue(valPtr);
+                    }
+                    while (valPtr != lastVisited);
+                }
+
+                meta = collision(meta);
+            }
+        }
+
+        state(State.VISITING, State.READING_WRITING);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        inputs.incrementAndGet();
+
+        if (!adders.isEmpty())
+            throw new IllegalStateException("Active adders.");
+
+        State s = state.get();
+
+        if (s == State.CLOSING)
+            throw new IllegalStateException("Closed.");
+
+        assert s != State.REHASHING;
+
+        return new Input(taskCtx) {
+            @Override public void close() throws IgniteCheckedException {
+                if (inputs.decrementAndGet() < 0)
+                    throw new IllegalStateException();
+
+                super.close();
+            }
+        };
+    }
+
+    /**
+     * @param fromTbl Table.
+     */
+    private void rehashIfNeeded(AtomicLongArray fromTbl) {
+        if (fromTbl.length() == Integer.MAX_VALUE)
+            return;
+
+        long keys0 = keys();
+
+        if (keys0 < 3 * (fromTbl.length() >>> 2)) // New size has to be >= than 3/4 of capacity to rehash.
+            return;
+
+        if (fromTbl != newTbl) // Check if someone else have done the job.
+            return;
+
+        if (!state.compareAndSet(State.READING_WRITING, State.REHASHING)) {
+            assert state.get() != State.CLOSING; // Visiting is allowed, but we will not rehash.
+
+            return;
+        }
+
+        if (fromTbl != newTbl) { // Double check.
+            state(State.REHASHING, State.READING_WRITING); // Switch back.
+
+            return;
+        }
+
+        // Calculate new table capacity.
+        int newLen = fromTbl.length();
+
+        do {
+            newLen <<= 1;
+        }
+        while (newLen < keys0);
+
+        if (keys0 >= 3 * (newLen >>> 2)) // Still more than 3/4.
+            newLen <<= 1;
+
+        // This is our target table for rehashing.
+        AtomicLongArray toTbl = new AtomicLongArray(newLen);
+
+        // Make the new table visible before rehashing.
+        newTbl = toTbl;
+
+        // Rehash.
+        int newMask = newLen - 1;
+
+        long failedMeta = 0;
+
+        GridLongList collisions = new GridLongList(16);
+
+        for (int i = 0; i < fromTbl.length(); i++) { // Scan source table.
+            long meta = fromTbl.get(i);
+
+            assert meta != -1;
+
+            if (meta == 0) { // No entry.
+                failedMeta = 0;
+
+                if (!fromTbl.compareAndSet(i, 0, -1)) // Mark as moved.
+                    i--; // Retry.
+
+                continue;
+            }
+
+            do { // Collect all the collisions before the last one failed to nullify or 0.
+                collisions.add(meta);
+
+                meta = collision(meta);
+            }
+            while (meta != failedMeta);
+
+            do { // Go from the last to the first to avoid 'in-flight' state for meta entries.
+                meta = collisions.remove();
+
+                int addr = keyHash(meta) & newMask;
+
+                for (;;) { // Move meta entry to the new table.
+                    long toCollision = toTbl.get(addr);
+
+                    collision(meta, toCollision);
+
+                    if (toTbl.compareAndSet(addr, toCollision, meta))
+                        break;
+                }
+            }
+            while (!collisions.isEmpty());
+
+            // Here 'meta' will be a root pointer in old table.
+            if (!fromTbl.compareAndSet(i, meta, -1)) { // Try to mark as moved.
+                failedMeta = meta;
+
+                i--; // Retry the same address in table because new keys were added.
+            }
+            else
+                failedMeta = 0;
+        }
+
+        // Now old and new tables will be the same again.
+        oldTbl = toTbl;
+
+        state(State.REHASHING, State.READING_WRITING);
+    }
+
+    /**
+     * Switch state.
+     *
+     * @param oldState Expected state.
+     * @param newState New state.
+     */
+    private void state(State oldState, State newState) {
+        if (!state.compareAndSet(oldState, newState))
+            throw new IllegalStateException();
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Value pointer.
+     */
+    @Override protected long value(long meta) {
+        return mem.readLongVolatile(meta + 16);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param oldValPtr Old value.
+     * @param newValPtr New value.
+     * @return {@code true} If succeeded.
+     */
+    private boolean casValue(long meta, long oldValPtr, long newValPtr) {
+        return mem.casLong(meta + 16, oldValPtr, newValPtr);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Collision pointer.
+     */
+    @Override protected long collision(long meta) {
+        return mem.readLongVolatile(meta + 24);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param collision Collision pointer.
+     */
+    @Override protected void collision(long meta, long collision) {
+        assert meta != collision : meta;
+
+        mem.writeLongVolatile(meta + 24, collision);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Last visited value pointer.
+     */
+    private long lastVisitedValue(long meta) {
+        return mem.readLong(meta + 32);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param valPtr Last visited value pointer.
+     */
+    private void lastVisitedValue(long meta, long valPtr) {
+        mem.writeLong(meta + 32, valPtr);
+    }
+
+    /**
+     * Adder. Must not be shared between threads.
+     */
+    private class AdderImpl extends AdderBase {
+        /** */
+        private final Reader keyReader;
+
+        /** */
+        private final AtomicInteger locKeys = new AtomicInteger();
+
+        /** */
+        private final Random rnd = new GridRandom();
+
+        /**
+         * @param ctx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        private AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
+            super(ctx);
+
+            keyReader = new Reader(keySer);
+
+            rehashIfNeeded(oldTbl);
+
+            adders.add(this);
+        }
+
+        /**
+         * @param in Data input.
+         * @param reuse Reusable key.
+         * @return Key.
+         * @throws IgniteCheckedException If failed.
+         */
+        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
+            KeyImpl k = reuse == null ? new KeyImpl() : (KeyImpl)reuse;
+
+            k.tmpKey = keySer.read(in, k.tmpKey);
+
+            k.meta = add(k.tmpKey, null);
+
+            return k;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(Object key, Object val) throws IgniteCheckedException {
+            A.notNull(val, "val");
+
+            add(key, val);
+        }
+
+        /**
+         * @param tbl Table.
+         */
+        private void incrementKeys(AtomicLongArray tbl) {
+            locKeys.lazySet(locKeys.get() + 1);
+
+            if (rnd.nextInt(tbl.length()) < 512)
+                rehashIfNeeded(tbl);
+        }
+
+        /**
+         * @param keyHash Key hash.
+         * @param keySize Key size.
+         * @param keyPtr Key pointer.
+         * @param valPtr Value page pointer.
+         * @param collisionPtr Pointer to meta with hash collision.
+         * @param lastVisitedVal Last visited value pointer.
+         * @return Created meta page pointer.
+         */
+        private long createMeta(int keyHash, int keySize, long keyPtr, long valPtr, long collisionPtr, long lastVisitedVal) {
+            long meta = allocate(40);
+
+            mem.writeInt(meta, keyHash);
+            mem.writeInt(meta + 4, keySize);
+            mem.writeLong(meta + 8, keyPtr);
+            mem.writeLong(meta + 16, valPtr);
+            mem.writeLong(meta + 24, collisionPtr);
+            mem.writeLong(meta + 32, lastVisitedVal);
+
+            return meta;
+        }
+
+        /**
+         * @param key Key.
+         * @param val Value.
+         * @return Updated or created meta page pointer.
+         * @throws IgniteCheckedException If failed.
+         */
+        private long add(Object key, @Nullable Object val) throws IgniteCheckedException {
+            AtomicLongArray tbl = oldTbl;
+
+            int keyHash = U.hash(key.hashCode());
+
+            long newMetaPtr = 0;
+
+            long valPtr = 0;
+
+            if (val != null) {
+                valPtr = write(12, val, valSer);
+                int valSize = writtenSize() - 12;
+
+                valueSize(valPtr, valSize);
+            }
+
+            for (AtomicLongArray old = null;;) {
+                int addr = keyHash & (tbl.length() - 1);
+
+                long metaPtrRoot = tbl.get(addr); // Read root meta pointer at this address.
+
+                if (metaPtrRoot == -1) { // The cell was already moved by rehashing.
+                    AtomicLongArray n = newTbl; // Need to read newTbl first here.
+                    AtomicLongArray o = oldTbl;
+
+                    tbl = tbl == o ? n : o; // Trying to get the oldest table but newer than ours.
+
+                    old = null;
+
+                    continue;
+                }
+
+                if (metaPtrRoot != 0) { // Not empty slot.
+                    long metaPtr = metaPtrRoot;
+
+                    do { // Scan all the collisions.
+                        if (keyHash(metaPtr) == keyHash && key.equals(keyReader.readKey(metaPtr))) { // Found key.
+                            if (newMetaPtr != 0)  // Deallocate new meta if one was allocated.
+                                localDeallocate(key(newMetaPtr)); // Key was allocated first, so rewind to it's pointer.
+
+                            if (valPtr != 0) { // Add value if it exists.
+                                long nextValPtr;
+
+                                // Values are linked to each other to a stack like structure.
+                                // Replace the last value in meta with ours and link it as next.
+                                do {
+                                    nextValPtr = value(metaPtr);
+
+                                    nextValue(valPtr, nextValPtr);
+                                }
+                                while (!casValue(metaPtr, nextValPtr, valPtr));
+                            }
+
+                            return metaPtr;
+                        }
+
+                        metaPtr = collision(metaPtr);
+                    }
+                    while (metaPtr != 0);
+
+                    // Here we did not find our key, need to check if it was moved by rehashing to the new table.
+                    if (old == null) { // If the old table already set, then we will just try to update it.
+                        AtomicLongArray n = newTbl;
+
+                        if (n != tbl) { // Rehashing happens, try to find the key in new table but preserve the old one.
+                            old = tbl;
+                            tbl = n;
+
+                            continue;
+                        }
+                    }
+                }
+
+                if (old != null) { // We just checked new table but did not find our key as well as in the old one.
+                    tbl = old; // Try to add new key to the old table.
+
+                    addr = keyHash & (tbl.length() - 1);
+
+                    old = null;
+                }
+
+                if (newMetaPtr == 0) { // Allocate new meta page.
+                    long keyPtr = write(0, key, keySer);
+                    int keySize = writtenSize();
+
+                    if (valPtr != 0)
+                        nextValue(valPtr, 0);
+
+                    newMetaPtr = createMeta(keyHash, keySize, keyPtr, valPtr, metaPtrRoot, 0);
+                }
+                else // Update new meta with root pointer collision.
+                    collision(newMetaPtr, metaPtrRoot);
+
+                if (tbl.compareAndSet(addr, metaPtrRoot, newMetaPtr)) { // Try to replace root pointer with new one.
+                    incrementKeys(tbl);
+
+                    return newMetaPtr;
+                }
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            if (!adders.remove(this))
+                throw new IllegalStateException();
+
+            keys.addAndGet(locKeys.get()); // Here we have race and #keys() method can return wrong result but it is ok.
+
+            super.close();
+        }
+
+        /**
+         * Key.
+         */
+        private class KeyImpl implements Key {
+            /** */
+            private long meta;
+
+            /** */
+            private Object tmpKey;
+
+            /**
+             * @return Meta pointer for the key.
+             */
+            public long address() {
+                return meta;
+            }
+
+            /**
+             * @param val Value.
+             */
+            @Override public void add(Value val) {
+                int size = val.size();
+
+                long valPtr = allocate(size + 12);
+
+                val.copyTo(valPtr + 12);
+
+                valueSize(valPtr, size);
+
+                long nextVal;
+
+                do {
+                    nextVal = value(meta);
+
+                    nextValue(valPtr, nextVal);
+                }
+                while(!casValue(meta, nextVal, valPtr));
+            }
+        }
+    }
+
+    /**
+     * Current map state.
+     */
+    private enum State {
+        /** */
+        REHASHING,
+
+        /** */
+        VISITING,
+
+        /** */
+        READING_WRITING,
+
+        /** */
+        CLOSING
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
new file mode 100644
index 0000000..c32e9af
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Hash multimap.
+ */
+public class HadoopHashMultimap extends HadoopHashMultimapBase {
+    /** */
+    private long[] tbl;
+
+    /** */
+    private int keys;
+
+    /**
+     * @param jobInfo Job info.
+     * @param mem Memory.
+     * @param cap Initial capacity.
+     */
+    public HadoopHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) {
+        super(jobInfo, mem);
+
+        assert U.isPow2(cap) : cap;
+
+        tbl = new long[cap];
+    }
+
+    /** {@inheritDoc} */
+    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
+        return new AdderImpl(ctx);
+    }
+
+    /**
+     * Rehash.
+     */
+    private void rehash() {
+        long[] newTbl = new long[tbl.length << 1];
+
+        int newMask = newTbl.length - 1;
+
+        for (long meta : tbl) {
+            while (meta != 0) {
+                long collision = collision(meta);
+
+                int idx = keyHash(meta) & newMask;
+
+                collision(meta, newTbl[idx]);
+
+                newTbl[idx] = meta;
+
+                meta = collision;
+            }
+        }
+
+        tbl = newTbl;
+    }
+
+    /**
+     * @return Keys count.
+     */
+    public int keys() {
+        return keys;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int capacity() {
+        return tbl.length;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long meta(int idx) {
+        return tbl[idx];
+    }
+
+    /**
+     * Adder.
+     */
+    private class AdderImpl extends AdderBase {
+        /** */
+        private final Reader keyReader;
+
+        /**
+         * @param ctx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
+            super(ctx);
+
+            keyReader = new Reader(keySer);
+        }
+
+        /**
+         * @param keyHash Key hash.
+         * @param keySize Key size.
+         * @param keyPtr Key pointer.
+         * @param valPtr Value page pointer.
+         * @param collisionPtr Pointer to meta with hash collision.
+         * @return Created meta page pointer.
+         */
+        private long createMeta(int keyHash, int keySize, long keyPtr, long valPtr, long collisionPtr) {
+            long meta = allocate(32);
+
+            mem.writeInt(meta, keyHash);
+            mem.writeInt(meta + 4, keySize);
+            mem.writeLong(meta + 8, keyPtr);
+            mem.writeLong(meta + 16, valPtr);
+            mem.writeLong(meta + 24, collisionPtr);
+
+            return meta;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(Object key, Object val) throws IgniteCheckedException {
+            A.notNull(val, "val");
+
+            int keyHash = U.hash(key.hashCode());
+
+            // Write value.
+            long valPtr = write(12, val, valSer);
+            int valSize = writtenSize() - 12;
+
+            valueSize(valPtr, valSize);
+
+            // Find position in table.
+            int idx = keyHash & (tbl.length - 1);
+
+            long meta = tbl[idx];
+
+            // Search for our key in collisions.
+            while (meta != 0) {
+                if (keyHash(meta) == keyHash && key.equals(keyReader.readKey(meta))) { // Found key.
+                    nextValue(valPtr, value(meta));
+
+                    value(meta, valPtr);
+
+                    return;
+                }
+
+                meta = collision(meta);
+            }
+
+            // Write key.
+            long keyPtr = write(0, key, keySer);
+            int keySize = writtenSize();
+
+            nextValue(valPtr, 0);
+
+            tbl[idx] = createMeta(keyHash, keySize, keyPtr, valPtr, tbl[idx]);
+
+            if (++keys > (tbl.length >>> 2) * 3)
+                rehash();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
new file mode 100644
index 0000000..8d9b3c3
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.util.Iterator;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+
+/**
+ * Base class for hash multimaps.
+ */
+public abstract class HadoopHashMultimapBase extends HadoopMultimapBase {
+    /**
+     * @param jobInfo Job info.
+     * @param mem Memory.
+     */
+    protected HadoopHashMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
+        super(jobInfo, mem);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
+        throw new UnsupportedOperationException("visit");
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        return new Input(taskCtx);
+    }
+
+    /**
+     * @return Hash table capacity.
+     */
+    public abstract int capacity();
+
+    /**
+     * @param idx Index in hash table.
+     * @return Meta page pointer.
+     */
+    protected abstract long meta(int idx);
+
+    /**
+     * @param meta Meta pointer.
+     * @return Key hash.
+     */
+    protected int keyHash(long meta) {
+        return mem.readInt(meta);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Key size.
+     */
+    protected int keySize(long meta) {
+        return mem.readInt(meta + 4);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Key pointer.
+     */
+    protected long key(long meta) {
+        return mem.readLong(meta + 8);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Value pointer.
+     */
+    protected long value(long meta) {
+        return mem.readLong(meta + 16);
+    }
+    /**
+     * @param meta Meta pointer.
+     * @param val Value pointer.
+     */
+    protected void value(long meta, long val) {
+        mem.writeLong(meta + 16, val);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Collision pointer.
+     */
+    protected long collision(long meta) {
+        return mem.readLong(meta + 24);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param collision Collision pointer.
+     */
+    protected void collision(long meta, long collision) {
+        assert meta != collision : meta;
+
+        mem.writeLong(meta + 24, collision);
+    }
+
+    /**
+     * Reader for key and value.
+     */
+    protected class Reader extends ReaderBase {
+        /**
+         * @param ser Serialization.
+         */
+        protected Reader(HadoopSerialization ser) {
+            super(ser);
+        }
+
+        /**
+         * @param meta Meta pointer.
+         * @return Key.
+         */
+        public Object readKey(long meta) {
+            assert meta > 0 : meta;
+
+            try {
+                return read(key(meta), keySize(meta));
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+    }
+
+    /**
+     * Task input.
+     */
+    protected class Input implements HadoopTaskInput {
+        /** */
+        private int idx = -1;
+
+        /** */
+        private long metaPtr;
+
+        /** */
+        private final int cap;
+
+        /** */
+        private final Reader keyReader;
+
+        /** */
+        private final Reader valReader;
+
+        /**
+         * @param taskCtx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        public Input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+            cap = capacity();
+
+            keyReader = new Reader(taskCtx.keySerialization());
+            valReader = new Reader(taskCtx.valueSerialization());
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean next() {
+            if (metaPtr != 0) {
+                metaPtr = collision(metaPtr);
+
+                if (metaPtr != 0)
+                    return true;
+            }
+
+            while (++idx < cap) { // Scan table.
+                metaPtr = meta(idx);
+
+                if (metaPtr != 0)
+                    return true;
+            }
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object key() {
+            return keyReader.readKey(metaPtr);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Iterator<?> values() {
+            return new ValueIterator(value(metaPtr), valReader);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            keyReader.close();
+            valReader.close();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
new file mode 100644
index 0000000..5b71c47
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.io.DataInput;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Multimap for hadoop intermediate results.
+ */
+@SuppressWarnings("PublicInnerClass")
+public interface HadoopMultimap extends AutoCloseable {
+    /**
+     * Incrementally visits all the keys and values in the map.
+     *
+     * @param ignoreLastVisited Flag indicating that visiting must be started from the beginning.
+     * @param v Visitor.
+     * @return {@code false} If visiting was impossible.
+     */
+    public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException;
+
+    /**
+     * @param ctx Task context.
+     * @return Adder.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException;
+
+    /**
+     * @param taskCtx Task context.
+     * @return Task input.
+     * @throws IgniteCheckedException If failed.
+     */
+    public HadoopTaskInput input(HadoopTaskContext taskCtx)
+        throws IgniteCheckedException;
+
+    /** {@inheritDoc} */
+    @Override public void close();
+
+    /**
+     * Adder.
+     */
+    public interface Adder extends HadoopTaskOutput {
+        /**
+         * @param in Data input.
+         * @param reuse Reusable key.
+         * @return Key.
+         * @throws IgniteCheckedException If failed.
+         */
+        public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException;
+    }
+
+    /**
+     * Key add values to.
+     */
+    public interface Key {
+        /**
+         * @param val Value.
+         */
+        public void add(Value val);
+    }
+
+    /**
+     * Value.
+     */
+    public interface Value {
+        /**
+         * @return Size in bytes.
+         */
+        public int size();
+
+        /**
+         * @param ptr Pointer.
+         */
+        public void copyTo(long ptr);
+    }
+
+    /**
+     * Key and values visitor.
+     */
+    public interface Visitor {
+        /**
+         * @param keyPtr Key pointer.
+         * @param keySize Key size.
+         */
+        public void onKey(long keyPtr, int keySize) throws IgniteCheckedException;
+
+        /**
+         * @param valPtr Value pointer.
+         * @param valSize Value size.
+         */
+        public void onValue(long valPtr, int valSize) throws IgniteCheckedException;
+    }
+}
\ No newline at end of file


[46/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
new file mode 100644
index 0000000..b0a4135
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java
@@ -0,0 +1,624 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.BufferedOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.commons.logging.Log;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.igfs.IgfsException;
+import org.apache.ignite.internal.GridLoggerProxy;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.igfs.common.IgfsControlResponse;
+import org.apache.ignite.internal.igfs.common.IgfsDataInputStream;
+import org.apache.ignite.internal.igfs.common.IgfsDataOutputStream;
+import org.apache.ignite.internal.igfs.common.IgfsIpcCommand;
+import org.apache.ignite.internal.igfs.common.IgfsMarshaller;
+import org.apache.ignite.internal.igfs.common.IgfsMessage;
+import org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.GridStripedLock;
+import org.apache.ignite.internal.util.ipc.IpcEndpoint;
+import org.apache.ignite.internal.util.ipc.IpcEndpointFactory;
+import org.apache.ignite.internal.util.ipc.shmem.IpcOutOfSystemResourcesException;
+import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentHashMap8;
+
+/**
+ * IO layer implementation based on blocking IPC streams.
+ */
+@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
+public class HadoopIgfsIpcIo implements HadoopIgfsIo {
+    /** Logger. */
+    private final Log log;
+
+    /** Request futures map. */
+    private ConcurrentMap<Long, HadoopIgfsFuture> reqMap =
+        new ConcurrentHashMap8<>();
+
+    /** Request ID counter. */
+    private AtomicLong reqIdCnt = new AtomicLong();
+
+    /** Endpoint. */
+    private IpcEndpoint endpoint;
+
+    /** Endpoint output stream. */
+    private IgfsDataOutputStream out;
+
+    /** Protocol. */
+    private final IgfsMarshaller marsh;
+
+    /** Client reader thread. */
+    private Thread reader;
+
+    /** Lock for graceful shutdown. */
+    private final ReadWriteLock busyLock = new ReentrantReadWriteLock();
+
+    /** Stopping flag. */
+    private volatile boolean stopping;
+
+    /** Server endpoint address. */
+    private final String endpointAddr;
+
+    /** Number of open file system sessions. */
+    private final AtomicInteger activeCnt = new AtomicInteger(1);
+
+    /** Event listeners. */
+    private final Collection<HadoopIgfsIpcIoListener> lsnrs =
+        new GridConcurrentHashSet<>();
+
+    /** Cached connections. */
+    private static final ConcurrentMap<String, HadoopIgfsIpcIo> ipcCache =
+        new ConcurrentHashMap8<>();
+
+    /** Striped lock that prevents multiple instance creation in {@link #get(Log, String)}. */
+    private static final GridStripedLock initLock = new GridStripedLock(32);
+
+    /**
+     * @param endpointAddr Endpoint.
+     * @param marsh Protocol.
+     * @param log Logger to use.
+     */
+    public HadoopIgfsIpcIo(String endpointAddr, IgfsMarshaller marsh, Log log) {
+        assert endpointAddr != null;
+        assert marsh != null;
+
+        this.endpointAddr = endpointAddr;
+        this.marsh = marsh;
+        this.log = log;
+    }
+
+    /**
+     * Returns a started and valid instance of this class
+     * for a given endpoint.
+     *
+     * @param log Logger to use for new instance.
+     * @param endpoint Endpoint string.
+     * @return New or existing cached instance, which is started and operational.
+     * @throws IOException If new instance was created but failed to start.
+     */
+    public static HadoopIgfsIpcIo get(Log log, String endpoint) throws IOException {
+        while (true) {
+            HadoopIgfsIpcIo clientIo = ipcCache.get(endpoint);
+
+            if (clientIo != null) {
+                if (clientIo.acquire())
+                    return clientIo;
+                else
+                    // If concurrent close.
+                    ipcCache.remove(endpoint, clientIo);
+            }
+            else {
+                Lock lock = initLock.getLock(endpoint);
+
+                lock.lock();
+
+                try {
+                    clientIo = ipcCache.get(endpoint);
+
+                    if (clientIo != null) { // Perform double check.
+                        if (clientIo.acquire())
+                            return clientIo;
+                        else
+                            // If concurrent close.
+                            ipcCache.remove(endpoint, clientIo);
+                    }
+
+                    // Otherwise try creating a new one.
+                    clientIo = new HadoopIgfsIpcIo(endpoint, new IgfsMarshaller(), log);
+
+                    try {
+                        clientIo.start();
+                    }
+                    catch (IgniteCheckedException e) {
+                        throw new IOException(e.getMessage(), e);
+                    }
+
+                    HadoopIgfsIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo);
+
+                    // Put in exclusive lock.
+                    assert old == null;
+
+                    return clientIo;
+                }
+                finally {
+                    lock.unlock();
+                }
+            }
+        }
+    }
+
+    /**
+     * Increases usage count for this instance.
+     *
+     * @return {@code true} if usage count is greater than zero.
+     */
+    private boolean acquire() {
+        while (true) {
+            int cnt = activeCnt.get();
+
+            if (cnt == 0) {
+                if (log.isDebugEnabled())
+                    log.debug("IPC IO not acquired (count was 0): " + this);
+
+                return false;
+            }
+
+            // Need to make sure that no-one decremented count in between.
+            if (activeCnt.compareAndSet(cnt, cnt + 1)) {
+                if (log.isDebugEnabled())
+                    log.debug("IPC IO acquired: " + this);
+
+                return true;
+            }
+        }
+    }
+
+    /**
+     * Releases this instance, decrementing usage count.
+     * <p>
+     * If usage count becomes zero, the instance is stopped
+     * and removed from cache.
+     */
+    public void release() {
+        while (true) {
+            int cnt = activeCnt.get();
+
+            if (cnt == 0) {
+                if (log.isDebugEnabled())
+                    log.debug("IPC IO not released (count was 0): " + this);
+
+                return;
+            }
+
+            if (activeCnt.compareAndSet(cnt, cnt - 1)) {
+                if (cnt == 1) {
+                    ipcCache.remove(endpointAddr, this);
+
+                    if (log.isDebugEnabled())
+                        log.debug("IPC IO stopping as unused: " + this);
+
+                    stop();
+                }
+                else if (log.isDebugEnabled())
+                    log.debug("IPC IO released: " + this);
+
+                return;
+            }
+        }
+    }
+
+    /**
+     * Closes this IO instance, removing it from cache.
+     */
+    public void forceClose() {
+        if (ipcCache.remove(endpointAddr, this))
+            stop();
+    }
+
+    /**
+     * Starts the IO.
+     *
+     * @throws IgniteCheckedException If failed to connect the endpoint.
+     */
+    private void start() throws IgniteCheckedException {
+        boolean success = false;
+
+        try {
+            endpoint = IpcEndpointFactory.connectEndpoint(
+                endpointAddr, new GridLoggerProxy(new HadoopIgfsJclLogger(log), null, null, ""));
+
+            out = new IgfsDataOutputStream(new BufferedOutputStream(endpoint.outputStream()));
+
+            reader = new ReaderThread();
+
+            // Required for Hadoop 2.x
+            reader.setDaemon(true);
+
+            reader.start();
+
+            success = true;
+        }
+        catch (IgniteCheckedException e) {
+            IpcOutOfSystemResourcesException resEx = e.getCause(IpcOutOfSystemResourcesException.class);
+
+            if (resEx != null)
+                throw new IgniteCheckedException(IpcSharedMemoryServerEndpoint.OUT_OF_RESOURCES_MSG, resEx);
+
+            throw e;
+        }
+        finally {
+            if (!success)
+                stop();
+        }
+    }
+
+    /**
+     * Shuts down the IO. No send requests will be accepted anymore, all pending futures will be failed.
+     * Close listeners will be invoked as if connection is closed by server.
+     */
+    private void stop() {
+        close0(null);
+
+        if (reader != null) {
+            try {
+                U.interrupt(reader);
+                U.join(reader);
+
+                reader = null;
+            }
+            catch (IgniteInterruptedCheckedException ignored) {
+                Thread.currentThread().interrupt();
+
+                log.warn("Got interrupted while waiting for reader thread to shut down (will return).");
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addEventListener(HadoopIgfsIpcIoListener lsnr) {
+        if (!busyLock.readLock().tryLock()) {
+            lsnr.onClose();
+
+            return;
+        }
+
+        boolean invokeNow = false;
+
+        try {
+            invokeNow = stopping;
+
+            if (!invokeNow)
+                lsnrs.add(lsnr);
+        }
+        finally {
+            busyLock.readLock().unlock();
+
+            if (invokeNow)
+                lsnr.onClose();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void removeEventListener(HadoopIgfsIpcIoListener lsnr) {
+        lsnrs.remove(lsnr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<IgfsMessage> send(IgfsMessage msg) throws IgniteCheckedException {
+        return send(msg, null, 0, 0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <T> IgniteInternalFuture<T> send(IgfsMessage msg, @Nullable byte[] outBuf, int outOff,
+        int outLen) throws IgniteCheckedException {
+        assert outBuf == null || msg.command() == IgfsIpcCommand.READ_BLOCK;
+
+        if (!busyLock.readLock().tryLock())
+            throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " +
+                "closed).");
+
+        try {
+            if (stopping)
+                throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " +
+                    "closed).");
+
+            long reqId = reqIdCnt.getAndIncrement();
+
+            HadoopIgfsFuture<T> fut = new HadoopIgfsFuture<>();
+
+            fut.outputBuffer(outBuf);
+            fut.outputOffset(outOff);
+            fut.outputLength(outLen);
+            fut.read(msg.command() == IgfsIpcCommand.READ_BLOCK);
+
+            HadoopIgfsFuture oldFut = reqMap.putIfAbsent(reqId, fut);
+
+            assert oldFut == null;
+
+            if (log.isDebugEnabled())
+                log.debug("Sending IGFS message [reqId=" + reqId + ", msg=" + msg + ']');
+
+            byte[] hdr = IgfsMarshaller.createHeader(reqId, msg.command());
+
+            IgniteCheckedException err = null;
+
+            try {
+                synchronized (this) {
+                    marsh.marshall(msg, hdr, out);
+
+                    out.flush(); // Blocking operation + sometimes system call.
+                }
+            }
+            catch (IgniteCheckedException e) {
+                err = e;
+            }
+            catch (IOException e) {
+                err = new HadoopIgfsCommunicationException(e);
+            }
+
+            if (err != null) {
+                reqMap.remove(reqId, fut);
+
+                fut.onDone(err);
+            }
+
+            return fut;
+        }
+        finally {
+            busyLock.readLock().unlock();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void sendPlain(IgfsMessage msg) throws IgniteCheckedException {
+        if (!busyLock.readLock().tryLock())
+            throw new HadoopIgfsCommunicationException("Failed to send message (client is being " +
+                "concurrently closed).");
+
+        try {
+            if (stopping)
+                throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently closed).");
+
+            assert msg.command() == IgfsIpcCommand.WRITE_BLOCK;
+
+            IgfsStreamControlRequest req = (IgfsStreamControlRequest)msg;
+
+            byte[] hdr = IgfsMarshaller.createHeader(-1, IgfsIpcCommand.WRITE_BLOCK);
+
+            U.longToBytes(req.streamId(), hdr, 12);
+            U.intToBytes(req.length(), hdr, 20);
+
+            synchronized (this) {
+                out.write(hdr);
+                out.write(req.data(), (int)req.position(), req.length());
+
+                out.flush();
+            }
+        }
+        catch (IOException e) {
+            throw new HadoopIgfsCommunicationException(e);
+        }
+        finally {
+            busyLock.readLock().unlock();
+        }
+    }
+
+    /**
+     * Closes client but does not wait.
+     *
+     * @param err Error.
+     */
+    private void close0(@Nullable Throwable err) {
+        busyLock.writeLock().lock();
+
+        try {
+            if (stopping)
+                return;
+
+            stopping = true;
+        }
+        finally {
+            busyLock.writeLock().unlock();
+        }
+
+        if (err == null)
+            err = new IgniteCheckedException("Failed to perform request (connection was concurrently closed before response " +
+                "is received).");
+
+        // Clean up resources.
+        U.closeQuiet(out);
+
+        if (endpoint != null)
+            endpoint.close();
+
+        // Unwind futures. We can safely iterate here because no more futures will be added.
+        Iterator<HadoopIgfsFuture> it = reqMap.values().iterator();
+
+        while (it.hasNext()) {
+            HadoopIgfsFuture fut = it.next();
+
+            fut.onDone(err);
+
+            it.remove();
+        }
+
+        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
+            lsnr.onClose();
+    }
+
+    /**
+     * Do not extend {@code GridThread} to minimize class dependencies.
+     */
+    private class ReaderThread extends Thread {
+        /** {@inheritDoc} */
+        @SuppressWarnings("unchecked")
+        @Override public void run() {
+            // Error to fail pending futures.
+            Throwable err = null;
+
+            try {
+                InputStream in = endpoint.inputStream();
+
+                IgfsDataInputStream dis = new IgfsDataInputStream(in);
+
+                byte[] hdr = new byte[IgfsMarshaller.HEADER_SIZE];
+                byte[] msgHdr = new byte[IgfsControlResponse.RES_HEADER_SIZE];
+
+                while (!Thread.currentThread().isInterrupted()) {
+                    dis.readFully(hdr);
+
+                    long reqId = U.bytesToLong(hdr, 0);
+
+                    // We don't wait for write responses, therefore reqId is -1.
+                    if (reqId == -1) {
+                        // We received a response which normally should not be sent. It must contain an error.
+                        dis.readFully(msgHdr);
+
+                        assert msgHdr[4] != 0;
+
+                        String errMsg = dis.readUTF();
+
+                        // Error code.
+                        dis.readInt();
+
+                        long streamId = dis.readLong();
+
+                        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
+                            lsnr.onError(streamId, errMsg);
+                    }
+                    else {
+                        HadoopIgfsFuture<Object> fut = reqMap.remove(reqId);
+
+                        if (fut == null) {
+                            String msg = "Failed to read response from server: response closure is unavailable for " +
+                                "requestId (will close connection):" + reqId;
+
+                            log.warn(msg);
+
+                            err = new IgniteCheckedException(msg);
+
+                            break;
+                        }
+                        else {
+                            try {
+                                IgfsIpcCommand cmd = IgfsIpcCommand.valueOf(U.bytesToInt(hdr, 8));
+
+                                if (log.isDebugEnabled())
+                                    log.debug("Received IGFS response [reqId=" + reqId + ", cmd=" + cmd + ']');
+
+                                Object res = null;
+
+                                if (fut.read()) {
+                                    dis.readFully(msgHdr);
+
+                                    boolean hasErr = msgHdr[4] != 0;
+
+                                    if (hasErr) {
+                                        String errMsg = dis.readUTF();
+
+                                        // Error code.
+                                        Integer errCode = dis.readInt();
+
+                                        IgfsControlResponse.throwError(errCode, errMsg);
+                                    }
+
+                                    int blockLen = U.bytesToInt(msgHdr, 5);
+
+                                    int readLen = Math.min(blockLen, fut.outputLength());
+
+                                    if (readLen > 0) {
+                                        assert fut.outputBuffer() != null;
+
+                                        dis.readFully(fut.outputBuffer(), fut.outputOffset(), readLen);
+                                    }
+
+                                    if (readLen != blockLen) {
+                                        byte[] buf = new byte[blockLen - readLen];
+
+                                        dis.readFully(buf);
+
+                                        res = buf;
+                                    }
+                                }
+                                else
+                                    res = marsh.unmarshall(cmd, hdr, dis);
+
+                                fut.onDone(res);
+                            }
+                            catch (IgfsException | IgniteCheckedException e) {
+                                if (log.isDebugEnabled())
+                                    log.debug("Failed to apply response closure (will fail request future): " +
+                                        e.getMessage());
+
+                                fut.onDone(e);
+
+                                err = e;
+                            }
+                            catch (Throwable t) {
+                                fut.onDone(t);
+
+                                throw t;
+                            }
+                        }
+                    }
+                }
+            }
+            catch (EOFException ignored) {
+                err = new IgniteCheckedException("Failed to read response from server (connection was closed by remote peer).");
+            }
+            catch (IOException e) {
+                if (!stopping)
+                    log.error("Failed to read data (connection will be closed)", e);
+
+                err = new HadoopIgfsCommunicationException(e);
+            }
+            catch (Throwable e) {
+                if (!stopping)
+                    log.error("Failed to obtain endpoint input stream (connection will be closed)", e);
+
+                err = e;
+
+                if (e instanceof Error)
+                    throw (Error)e;
+            }
+            finally {
+                close0(err);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return getClass().getSimpleName() + " [endpointAddr=" + endpointAddr + ", activeCnt=" + activeCnt +
+            ", stopping=" + stopping + ']';
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
new file mode 100644
index 0000000..c26e896
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+/**
+ * Listens to the events of {@link HadoopIgfsIpcIo}.
+ */
+public interface HadoopIgfsIpcIoListener {
+    /**
+     * Callback invoked when the IO is being closed.
+     */
+    public void onClose();
+
+    /**
+     * Callback invoked when remote error occurs.
+     *
+     * @param streamId Stream ID.
+     * @param errMsg Error message.
+     */
+    public void onError(long streamId, String errMsg);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
new file mode 100644
index 0000000..3a7f45b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * JCL logger wrapper for Hadoop.
+ */
+public class HadoopIgfsJclLogger implements IgniteLogger {
+    /** JCL implementation proxy. */
+    @GridToStringInclude
+    private Log impl;
+
+    /**
+     * Constructor.
+     *
+     * @param impl JCL implementation to use.
+     */
+    HadoopIgfsJclLogger(Log impl) {
+        assert impl != null;
+
+        this.impl = impl;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteLogger getLogger(Object ctgr) {
+        return new HadoopIgfsJclLogger(LogFactory.getLog(
+            ctgr instanceof Class ? ((Class)ctgr).getName() : String.valueOf(ctgr)));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void trace(String msg) {
+        impl.trace(msg);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void debug(String msg) {
+        impl.debug(msg);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void info(String msg) {
+        impl.info(msg);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void warning(String msg) {
+        impl.warn(msg);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void warning(String msg, @Nullable Throwable e) {
+        impl.warn(msg, e);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void error(String msg) {
+        impl.error(msg);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isQuiet() {
+        return !isInfoEnabled() && !isDebugEnabled();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void error(String msg, @Nullable Throwable e) {
+        impl.error(msg, e);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isTraceEnabled() {
+        return impl.isTraceEnabled();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isDebugEnabled() {
+        return impl.isDebugEnabled();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isInfoEnabled() {
+        return impl.isInfoEnabled();
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public String fileName() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopIgfsJclLogger.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
new file mode 100644
index 0000000..9902142
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java
@@ -0,0 +1,524 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.commons.logging.Log;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsException;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathSummary;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.igfs.common.IgfsControlResponse;
+import org.apache.ignite.internal.igfs.common.IgfsHandshakeRequest;
+import org.apache.ignite.internal.igfs.common.IgfsMessage;
+import org.apache.ignite.internal.igfs.common.IgfsPathControlRequest;
+import org.apache.ignite.internal.igfs.common.IgfsStatusRequest;
+import org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsInputStreamDescriptor;
+import org.apache.ignite.internal.processors.igfs.IgfsStatus;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.lang.GridClosureException;
+import org.apache.ignite.lang.IgniteClosure;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentHashMap8;
+
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.AFFINITY;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.CLOSE;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.DELETE;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.INFO;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.LIST_FILES;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.LIST_PATHS;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.MAKE_DIRECTORIES;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_APPEND;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_CREATE;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.OPEN_READ;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.PATH_SUMMARY;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.READ_BLOCK;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.RENAME;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.SET_TIMES;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.UPDATE;
+import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.WRITE_BLOCK;
+
+/**
+ * Communication with external process (TCP or shmem).
+ */
+public class HadoopIgfsOutProc implements HadoopIgfsEx, HadoopIgfsIpcIoListener {
+    /** Expected result is boolean. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, Boolean> BOOL_RES = createClosure();
+
+    /** Expected result is boolean. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, Long> LONG_RES = createClosure();
+
+    /** Expected result is {@code IgfsFile}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsFile> FILE_RES = createClosure();
+
+    /** Expected result is {@code IgfsHandshakeResponse} */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
+        IgfsHandshakeResponse> HANDSHAKE_RES = createClosure();
+
+    /** Expected result is {@code IgfsStatus} */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsStatus> STATUS_RES =
+        createClosure();
+
+    /** Expected result is {@code IgfsFile}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
+        IgfsInputStreamDescriptor> STREAM_DESCRIPTOR_RES = createClosure();
+
+    /** Expected result is {@code IgfsFile}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
+        Collection<IgfsFile>> FILE_COL_RES = createClosure();
+
+    /** Expected result is {@code IgfsFile}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
+        Collection<IgfsPath>> PATH_COL_RES = createClosure();
+
+    /** Expected result is {@code IgfsPathSummary}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>, IgfsPathSummary> SUMMARY_RES =
+        createClosure();
+
+    /** Expected result is {@code IgfsFile}. */
+    private static final IgniteClosure<IgniteInternalFuture<IgfsMessage>,
+        Collection<IgfsBlockLocation>> BLOCK_LOCATION_COL_RES = createClosure();
+
+    /** Grid name. */
+    private final String grid;
+
+    /** IGFS name. */
+    private final String igfs;
+
+    /** The user this out proc is performing on behalf of. */
+    private final String userName;
+
+    /** Client log. */
+    private final Log log;
+
+    /** Client IO. */
+    private final HadoopIgfsIpcIo io;
+
+    /** Event listeners. */
+    private final Map<Long, HadoopIgfsStreamEventListener> lsnrs = new ConcurrentHashMap8<>();
+
+    /**
+     * Constructor for TCP endpoint.
+     *
+     * @param host Host.
+     * @param port Port.
+     * @param grid Grid name.
+     * @param igfs IGFS name.
+     * @param log Client logger.
+     * @throws IOException If failed.
+     */
+    public HadoopIgfsOutProc(String host, int port, String grid, String igfs, Log log, String user) throws IOException {
+        this(host, port, grid, igfs, false, log, user);
+    }
+
+    /**
+     * Constructor for shmem endpoint.
+     *
+     * @param port Port.
+     * @param grid Grid name.
+     * @param igfs IGFS name.
+     * @param log Client logger.
+     * @throws IOException If failed.
+     */
+    public HadoopIgfsOutProc(int port, String grid, String igfs, Log log, String user) throws IOException {
+        this(null, port, grid, igfs, true, log, user);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param host Host.
+     * @param port Port.
+     * @param grid Grid name.
+     * @param igfs IGFS name.
+     * @param shmem Shared memory flag.
+     * @param log Client logger.
+     * @throws IOException If failed.
+     */
+    private HadoopIgfsOutProc(String host, int port, String grid, String igfs, boolean shmem, Log log, String user)
+        throws IOException {
+        assert host != null && !shmem || host == null && shmem :
+            "Invalid arguments [host=" + host + ", port=" + port + ", shmem=" + shmem + ']';
+
+        String endpoint = host != null ? host + ":" + port : "shmem:" + port;
+
+        this.grid = grid;
+        this.igfs = igfs;
+        this.log = log;
+        this.userName = IgfsUtils.fixUserName(user);
+
+        io = HadoopIgfsIpcIo.get(log, endpoint);
+
+        io.addEventListener(this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsHandshakeResponse handshake(String logDir) throws IgniteCheckedException {
+        final IgfsHandshakeRequest req = new IgfsHandshakeRequest();
+
+        req.gridName(grid);
+        req.igfsName(igfs);
+        req.logDirectory(logDir);
+
+        return io.send(req).chain(HANDSHAKE_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close(boolean force) {
+        assert io != null;
+
+        io.removeEventListener(this);
+
+        if (force)
+            io.forceClose();
+        else
+            io.release();
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile info(IgfsPath path) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(INFO);
+        msg.path(path);
+        msg.userName(userName);
+
+        return io.send(msg).chain(FILE_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(UPDATE);
+        msg.path(path);
+        msg.properties(props);
+        msg.userName(userName);
+
+        return io.send(msg).chain(FILE_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(SET_TIMES);
+        msg.path(path);
+        msg.accessTime(accessTime);
+        msg.modificationTime(modificationTime);
+        msg.userName(userName);
+
+        return io.send(msg).chain(BOOL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean rename(IgfsPath src, IgfsPath dest) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(RENAME);
+        msg.path(src);
+        msg.destinationPath(dest);
+        msg.userName(userName);
+
+        return io.send(msg).chain(BOOL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean delete(IgfsPath path, boolean recursive) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(DELETE);
+        msg.path(path);
+        msg.flag(recursive);
+        msg.userName(userName);
+
+        return io.send(msg).chain(BOOL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len)
+        throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(AFFINITY);
+        msg.path(path);
+        msg.start(start);
+        msg.length(len);
+        msg.userName(userName);
+
+        return io.send(msg).chain(BLOCK_LOCATION_COL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsPathSummary contentSummary(IgfsPath path) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(PATH_SUMMARY);
+        msg.path(path);
+        msg.userName(userName);
+
+        return io.send(msg).chain(SUMMARY_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean mkdirs(IgfsPath path, Map<String, String> props) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(MAKE_DIRECTORIES);
+        msg.path(path);
+        msg.properties(props);
+        msg.userName(userName);
+
+        return io.send(msg).chain(BOOL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(LIST_FILES);
+        msg.path(path);
+        msg.userName(userName);
+
+        return io.send(msg).chain(FILE_COL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(LIST_PATHS);
+        msg.path(path);
+        msg.userName(userName);
+
+        return io.send(msg).chain(PATH_COL_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsStatus fsStatus() throws IgniteCheckedException {
+        return io.send(new IgfsStatusRequest()).chain(STATUS_RES).get();
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(OPEN_READ);
+        msg.path(path);
+        msg.flag(false);
+        msg.userName(userName);
+
+        IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
+
+        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length());
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(IgfsPath path,
+        int seqReadsBeforePrefetch) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(OPEN_READ);
+        msg.path(path);
+        msg.flag(true);
+        msg.sequentialReadsBeforePrefetch(seqReadsBeforePrefetch);
+        msg.userName(userName);
+
+        IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
+
+        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length());
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate,
+        int replication, long blockSize, @Nullable Map<String, String> props) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(OPEN_CREATE);
+        msg.path(path);
+        msg.flag(overwrite);
+        msg.colocate(colocate);
+        msg.properties(props);
+        msg.replication(replication);
+        msg.blockSize(blockSize);
+        msg.userName(userName);
+
+        Long streamId = io.send(msg).chain(LONG_RES).get();
+
+        return new HadoopIgfsStreamDelegate(this, streamId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create,
+        @Nullable Map<String, String> props) throws IgniteCheckedException {
+        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
+
+        msg.command(OPEN_APPEND);
+        msg.path(path);
+        msg.flag(create);
+        msg.properties(props);
+        msg.userName(userName);
+
+        Long streamId = io.send(msg).chain(LONG_RES).get();
+
+        return new HadoopIgfsStreamDelegate(this, streamId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate desc, long pos, int len,
+        final @Nullable byte[] outBuf, final int outOff, final int outLen) {
+        assert len > 0;
+
+        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
+
+        msg.command(READ_BLOCK);
+        msg.streamId((long) desc.target());
+        msg.position(pos);
+        msg.length(len);
+
+        try {
+            return io.send(msg, outBuf, outOff, outLen);
+        }
+        catch (IgniteCheckedException e) {
+            return new GridFinishedFuture<>(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeData(HadoopIgfsStreamDelegate desc, byte[] data, int off, int len)
+        throws IOException {
+        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
+
+        msg.command(WRITE_BLOCK);
+        msg.streamId((long) desc.target());
+        msg.data(data);
+        msg.position(off);
+        msg.length(len);
+
+        try {
+            io.sendPlain(msg);
+        }
+        catch (IgniteCheckedException e) {
+            throw HadoopIgfsUtils.cast(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException {
+        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
+
+        msg.command(CLOSE);
+        msg.streamId((long)desc.target());
+
+        try {
+            io.send(msg).chain(BOOL_RES).get();
+        }
+        catch (IgniteCheckedException e) {
+            throw HadoopIgfsUtils.cast(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addEventListener(HadoopIgfsStreamDelegate desc,
+        HadoopIgfsStreamEventListener lsnr) {
+        long streamId = desc.target();
+
+        HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(streamId, lsnr);
+
+        assert lsnr0 == null || lsnr0 == lsnr;
+
+        if (log.isDebugEnabled())
+            log.debug("Added stream event listener [streamId=" + streamId + ']');
+    }
+
+    /** {@inheritDoc} */
+    @Override public void removeEventListener(HadoopIgfsStreamDelegate desc) {
+        long streamId = desc.target();
+
+        HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(streamId);
+
+        if (lsnr0 != null && log.isDebugEnabled())
+            log.debug("Removed stream event listener [streamId=" + streamId + ']');
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onClose() {
+        for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) {
+            try {
+                lsnr.onClose();
+            }
+            catch (IgniteCheckedException e) {
+                log.warn("Got exception from stream event listener (will ignore): " + lsnr, e);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(long streamId, String errMsg) {
+        HadoopIgfsStreamEventListener lsnr = lsnrs.get(streamId);
+
+        if (lsnr != null)
+            lsnr.onError(errMsg);
+        else
+            log.warn("Received write error response for not registered output stream (will ignore) " +
+                "[streamId= " + streamId + ']');
+    }
+
+    /**
+     * Creates conversion closure for given type.
+     *
+     * @param <T> Type of expected result.
+     * @return Conversion closure.
+     */
+    @SuppressWarnings("unchecked")
+    private static <T> IgniteClosure<IgniteInternalFuture<IgfsMessage>, T> createClosure() {
+        return new IgniteClosure<IgniteInternalFuture<IgfsMessage>, T>() {
+            @Override public T apply(IgniteInternalFuture<IgfsMessage> fut) {
+                try {
+                    IgfsControlResponse res = (IgfsControlResponse)fut.get();
+
+                    if (res.hasError())
+                        res.throwError();
+
+                    return (T)res.response();
+                }
+                catch (IgfsException | IgniteCheckedException e) {
+                    throw new GridClosureException(e);
+                }
+            }
+        };
+    }
+
+    /** {@inheritDoc} */
+    @Override public String user() {
+        return userName;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
new file mode 100644
index 0000000..8f7458b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import org.apache.commons.logging.Log;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * IGFS Hadoop output stream implementation.
+ */
+public class HadoopIgfsOutputStream extends OutputStream implements HadoopIgfsStreamEventListener {
+    /** Log instance. */
+    private Log log;
+
+    /** Client logger. */
+    private IgfsLogger clientLog;
+
+    /** Log stream ID. */
+    private long logStreamId;
+
+    /** Server stream delegate. */
+    private HadoopIgfsStreamDelegate delegate;
+
+    /** Closed flag. */
+    private volatile boolean closed;
+
+    /** Flag set if stream was closed due to connection breakage. */
+    private boolean connBroken;
+
+    /** Error message. */
+    private volatile String errMsg;
+
+    /** Read time. */
+    private long writeTime;
+
+    /** User time. */
+    private long userTime;
+
+    /** Last timestamp. */
+    private long lastTs;
+
+    /** Amount of written bytes. */
+    private long total;
+
+    /**
+     * Creates light output stream.
+     *
+     * @param delegate Server stream delegate.
+     * @param log Logger to use.
+     * @param clientLog Client logger.
+     */
+    public HadoopIgfsOutputStream(HadoopIgfsStreamDelegate delegate, Log log,
+        IgfsLogger clientLog, long logStreamId) {
+        this.delegate = delegate;
+        this.log = log;
+        this.clientLog = clientLog;
+        this.logStreamId = logStreamId;
+
+        lastTs = System.nanoTime();
+
+        delegate.hadoop().addEventListener(delegate, this);
+    }
+
+    /**
+     * Read start.
+     */
+    private void writeStart() {
+        long now = System.nanoTime();
+
+        userTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /**
+     * Read end.
+     */
+    private void writeEnd() {
+        long now = System.nanoTime();
+
+        writeTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(@NotNull byte[] b, int off, int len) throws IOException {
+        check();
+
+        writeStart();
+
+        try {
+            delegate.hadoop().writeData(delegate, b, off, len);
+
+            total += len;
+        }
+        finally {
+            writeEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(int b) throws IOException {
+        write(new byte[] {(byte)b});
+
+        total++;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void flush() throws IOException {
+        delegate.hadoop().flush(delegate);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        if (!closed) {
+            if (log.isDebugEnabled())
+                log.debug("Closing output stream: " + delegate);
+
+            writeStart();
+
+            delegate.hadoop().closeStream(delegate);
+
+            markClosed(false);
+
+            writeEnd();
+
+            if (clientLog.isLogEnabled())
+                clientLog.logCloseOut(logStreamId, userTime, writeTime, total);
+
+            if (log.isDebugEnabled())
+                log.debug("Closed output stream [delegate=" + delegate + ", writeTime=" + writeTime / 1000 +
+                    ", userTime=" + userTime / 1000 + ']');
+        }
+        else if(connBroken)
+            throw new IOException(
+                "Failed to close stream, because connection was broken (data could have been lost).");
+    }
+
+    /**
+     * Marks stream as closed.
+     *
+     * @param connBroken {@code True} if connection with server was lost.
+     */
+    private void markClosed(boolean connBroken) {
+        // It is ok to have race here.
+        if (!closed) {
+            closed = true;
+
+            delegate.hadoop().removeEventListener(delegate);
+
+            this.connBroken = connBroken;
+        }
+    }
+
+    /**
+     * @throws IOException If check failed.
+     */
+    private void check() throws IOException {
+        String errMsg0 = errMsg;
+
+        if (errMsg0 != null)
+            throw new IOException(errMsg0);
+
+        if (closed) {
+            if (connBroken)
+                throw new IOException("Server connection was lost.");
+            else
+                throw new IOException("Stream is closed.");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onClose() throws IgniteCheckedException {
+        markClosed(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(String errMsg) {
+        this.errMsg = errMsg;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
new file mode 100644
index 0000000..90f6bca
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.util.Map;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+
+/**
+ * Hadoop file system properties.
+ */
+public class HadoopIgfsProperties {
+    /** Username. */
+    private String usrName;
+
+    /** Group name. */
+    private String grpName;
+
+    /** Permissions. */
+    private FsPermission perm;
+
+    /**
+     * Constructor.
+     *
+     * @param props Properties.
+     * @throws IgniteException In case of error.
+     */
+    public HadoopIgfsProperties(Map<String, String> props) throws IgniteException {
+        usrName = props.get(IgfsUtils.PROP_USER_NAME);
+        grpName = props.get(IgfsUtils.PROP_GROUP_NAME);
+
+        String permStr = props.get(IgfsUtils.PROP_PERMISSION);
+
+        if (permStr != null) {
+            try {
+                perm = new FsPermission((short)Integer.parseInt(permStr, 8));
+            }
+            catch (NumberFormatException ignore) {
+                throw new IgniteException("Permissions cannot be parsed: " + permStr);
+            }
+        }
+    }
+
+    /**
+     * Get user name.
+     *
+     * @return User name.
+     */
+    public String userName() {
+        return usrName;
+    }
+
+    /**
+     * Get group name.
+     *
+     * @return Group name.
+     */
+    public String groupName() {
+        return grpName;
+    }
+
+    /**
+     * Get permission.
+     *
+     * @return Permission.
+     */
+    public FsPermission permission() {
+        return perm;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
new file mode 100644
index 0000000..5cee947
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+
+/**
+ * Secondary Hadoop file system input stream wrapper.
+ */
+public class HadoopIgfsProxyInputStream extends InputStream implements Seekable, PositionedReadable {
+    /** Actual input stream to the secondary file system. */
+    private final FSDataInputStream is;
+
+    /** Client logger. */
+    private final IgfsLogger clientLog;
+
+    /** Log stream ID. */
+    private final long logStreamId;
+
+    /** Read time. */
+    private long readTime;
+
+    /** User time. */
+    private long userTime;
+
+    /** Last timestamp. */
+    private long lastTs;
+
+    /** Amount of read bytes. */
+    private long total;
+
+    /** Closed flag. */
+    private boolean closed;
+
+    /**
+     * Constructor.
+     *
+     * @param is Actual input stream to the secondary file system.
+     * @param clientLog Client log.
+     */
+    public HadoopIgfsProxyInputStream(FSDataInputStream is, IgfsLogger clientLog, long logStreamId) {
+        assert is != null;
+        assert clientLog != null;
+
+        this.is = is;
+        this.clientLog = clientLog;
+        this.logStreamId = logStreamId;
+
+        lastTs = System.nanoTime();
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read(byte[] b) throws IOException {
+        readStart();
+
+        int res;
+
+        try {
+            res = is.read(b);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (res != -1)
+            total += res;
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read(byte[] b, int off, int len) throws IOException {
+        readStart();
+
+        int res;
+
+        try {
+            res = super.read(b, off, len);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (res != -1)
+            total += res;
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized long skip(long n) throws IOException {
+        readStart();
+
+        long res;
+
+        try {
+            res =  is.skip(n);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (clientLog.isLogEnabled())
+            clientLog.logSkip(logStreamId, res);
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int available() throws IOException {
+        readStart();
+
+        try {
+            return is.available();
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void close() throws IOException {
+        if (!closed) {
+            closed = true;
+
+            readStart();
+
+            try {
+                is.close();
+            }
+            finally {
+                readEnd();
+            }
+
+            if (clientLog.isLogEnabled())
+                clientLog.logCloseIn(logStreamId, userTime, readTime, total);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void mark(int readLimit) {
+        readStart();
+
+        try {
+            is.mark(readLimit);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (clientLog.isLogEnabled())
+            clientLog.logMark(logStreamId, readLimit);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void reset() throws IOException {
+        readStart();
+
+        try {
+            is.reset();
+        }
+        finally {
+            readEnd();
+        }
+
+        if (clientLog.isLogEnabled())
+            clientLog.logReset(logStreamId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized boolean markSupported() {
+        readStart();
+
+        try {
+            return is.markSupported();
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read() throws IOException {
+        readStart();
+
+        int res;
+
+        try {
+            res = is.read();
+        }
+        finally {
+            readEnd();
+        }
+
+        if (res != -1)
+            total++;
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized int read(long pos, byte[] buf, int off, int len) throws IOException {
+        readStart();
+
+        int res;
+
+        try {
+            res = is.read(pos, buf, off, len);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (res != -1)
+            total += res;
+
+        if (clientLog.isLogEnabled())
+            clientLog.logRandomRead(logStreamId, pos, res);
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void readFully(long pos, byte[] buf, int off, int len) throws IOException {
+        readStart();
+
+        try {
+            is.readFully(pos, buf, off, len);
+        }
+        finally {
+            readEnd();
+        }
+
+        total += len;
+
+        if (clientLog.isLogEnabled())
+            clientLog.logRandomRead(logStreamId, pos, len);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void readFully(long pos, byte[] buf) throws IOException {
+        readStart();
+
+        try {
+            is.readFully(pos, buf);
+        }
+        finally {
+            readEnd();
+        }
+
+        total += buf.length;
+
+        if (clientLog.isLogEnabled())
+            clientLog.logRandomRead(logStreamId, pos, buf.length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void seek(long pos) throws IOException {
+        readStart();
+
+        try {
+            is.seek(pos);
+        }
+        finally {
+            readEnd();
+        }
+
+        if (clientLog.isLogEnabled())
+            clientLog.logSeek(logStreamId, pos);
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized long getPos() throws IOException {
+        readStart();
+
+        try {
+            return is.getPos();
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+        readStart();
+
+        try {
+            return is.seekToNewSource(targetPos);
+        }
+        finally {
+            readEnd();
+        }
+    }
+
+    /**
+     * Read start.
+     */
+    private void readStart() {
+        long now = System.nanoTime();
+
+        userTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /**
+     * Read end.
+     */
+    private void readEnd() {
+        long now = System.nanoTime();
+
+        readTime += now - lastTs;
+
+        lastTs = now;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
new file mode 100644
index 0000000..eade0f0
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.ignite.internal.igfs.common.IgfsLogger;
+
+/**
+ * Secondary Hadoop file system output stream wrapper.
+ */
+public class HadoopIgfsProxyOutputStream extends OutputStream {
+    /** Actual output stream. */
+    private FSDataOutputStream os;
+
+    /** Client logger. */
+    private final IgfsLogger clientLog;
+
+    /** Log stream ID. */
+    private final long logStreamId;
+
+    /** Read time. */
+    private long writeTime;
+
+    /** User time. */
+    private long userTime;
+
+    /** Last timestamp. */
+    private long lastTs;
+
+    /** Amount of written bytes. */
+    private long total;
+
+    /** Closed flag. */
+    private boolean closed;
+
+    /**
+     * Constructor.
+     *
+     * @param os Actual output stream.
+     * @param clientLog Client logger.
+     * @param logStreamId Log stream ID.
+     */
+    public HadoopIgfsProxyOutputStream(FSDataOutputStream os, IgfsLogger clientLog, long logStreamId) {
+        assert os != null;
+        assert clientLog != null;
+
+        this.os = os;
+        this.clientLog = clientLog;
+        this.logStreamId = logStreamId;
+
+        lastTs = System.nanoTime();
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void write(int b) throws IOException {
+        writeStart();
+
+        try {
+            os.write(b);
+        }
+        finally {
+            writeEnd();
+        }
+
+        total++;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void write(byte[] b) throws IOException {
+        writeStart();
+
+        try {
+            os.write(b);
+        }
+        finally {
+            writeEnd();
+        }
+
+        total += b.length;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void write(byte[] b, int off, int len) throws IOException {
+        writeStart();
+
+        try {
+            os.write(b, off, len);
+        }
+        finally {
+            writeEnd();
+        }
+
+        total += len;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void flush() throws IOException {
+        writeStart();
+
+        try {
+            os.flush();
+        }
+        finally {
+            writeEnd();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized void close() throws IOException {
+        if (!closed) {
+            closed = true;
+
+            writeStart();
+
+            try {
+                os.close();
+            }
+            finally {
+                writeEnd();
+            }
+
+            if (clientLog.isLogEnabled())
+                clientLog.logCloseOut(logStreamId, userTime, writeTime, total);
+        }
+    }
+
+    /**
+     * Read start.
+     */
+    private void writeStart() {
+        long now = System.nanoTime();
+
+        userTime += now - lastTs;
+
+        lastTs = now;
+    }
+
+    /**
+     * Read end.
+     */
+    private void writeEnd() {
+        long now = System.nanoTime();
+
+        writeTime += now - lastTs;
+
+        lastTs = now;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
new file mode 100644
index 0000000..a0577ce
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystemPositionedReadable;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Secondary file system input stream wrapper which actually opens input stream only in case it is explicitly
+ * requested.
+ * <p>
+ * The class is expected to be used only from synchronized context and therefore is not tread-safe.
+ */
+public class HadoopIgfsSecondaryFileSystemPositionedReadable implements IgfsSecondaryFileSystemPositionedReadable {
+    /** Secondary file system. */
+    private final FileSystem fs;
+
+    /** Path to the file to open. */
+    private final Path path;
+
+    /** Buffer size. */
+    private final int bufSize;
+
+    /** Actual input stream. */
+    private FSDataInputStream in;
+
+    /** Cached error occurred during output stream open. */
+    private IOException err;
+
+    /** Flag indicating that the stream was already opened. */
+    private boolean opened;
+
+    /**
+     * Constructor.
+     *
+     * @param fs Secondary file system.
+     * @param path Path to the file to open.
+     * @param bufSize Buffer size.
+     */
+    public HadoopIgfsSecondaryFileSystemPositionedReadable(FileSystem fs, Path path, int bufSize) {
+        assert fs != null;
+        assert path != null;
+
+        this.fs = fs;
+        this.path = path;
+        this.bufSize = bufSize;
+    }
+
+    /** Get input stream. */
+    private PositionedReadable in() throws IOException {
+        if (opened) {
+            if (err != null)
+                throw err;
+        }
+        else {
+            opened = true;
+
+            try {
+                in = fs.open(path, bufSize);
+
+                if (in == null)
+                    throw new IOException("Failed to open input stream (file system returned null): " + path);
+            }
+            catch (IOException e) {
+                err = e;
+
+                throw err;
+            }
+        }
+
+        return in;
+    }
+
+    /**
+     * Close wrapped input stream in case it was previously opened.
+     */
+    @Override public void close() {
+        U.closeQuiet(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int read(long pos, byte[] buf, int off, int len) throws IOException {
+        return in().read(pos, buf, off, len);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
new file mode 100644
index 0000000..37b58ab
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * IGFS Hadoop stream descriptor.
+ */
+public class HadoopIgfsStreamDelegate {
+    /** RPC handler. */
+    private final HadoopIgfsEx hadoop;
+
+    /** Target. */
+    private final Object target;
+
+    /** Optional stream length. */
+    private final long len;
+
+    /**
+     * Constructor.
+     *
+     * @param target Target.
+     */
+    public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target) {
+        this(hadoop, target, -1);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param target Target.
+     * @param len Optional length.
+     */
+    public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target, long len) {
+        assert hadoop != null;
+        assert target != null;
+
+        this.hadoop = hadoop;
+        this.target = target;
+        this.len = len;
+    }
+
+    /**
+     * @return RPC handler.
+     */
+    public HadoopIgfsEx hadoop() {
+        return hadoop;
+    }
+
+    /**
+     * @return Stream target.
+     */
+    @SuppressWarnings("unchecked")
+    public <T> T target() {
+        return (T) target;
+    }
+
+    /**
+     * @return Length.
+     */
+    public long length() {
+        return len;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return System.identityHashCode(target);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        return obj != null && obj instanceof HadoopIgfsStreamDelegate &&
+            target == ((HadoopIgfsStreamDelegate)obj).target;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopIgfsStreamDelegate.class, this);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
new file mode 100644
index 0000000..d81f765
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import org.apache.ignite.IgniteCheckedException;
+
+/**
+ * IGFS input stream event listener.
+ */
+public interface HadoopIgfsStreamEventListener {
+    /**
+     * Callback invoked when the stream is being closed.
+     *
+     * @throws IgniteCheckedException If failed.
+     */
+    public void onClose() throws IgniteCheckedException;
+
+    /**
+     * Callback invoked when remote error occurs.
+     *
+     * @param errMsg Error message.
+     */
+    public void onError(String errMsg);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
new file mode 100644
index 0000000..fa5cbc5
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.igfs.IgfsDirectoryNotEmptyException;
+import org.apache.ignite.igfs.IgfsParentNotDirectoryException;
+import org.apache.ignite.igfs.IgfsPathAlreadyExistsException;
+import org.apache.ignite.igfs.IgfsPathNotFoundException;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Utility constants and methods for IGFS Hadoop file system.
+ */
+public class HadoopIgfsUtils {
+    /** Parameter name for endpoint no embed mode flag. */
+    public static final String PARAM_IGFS_ENDPOINT_NO_EMBED = "fs.igfs.%s.endpoint.no_embed";
+
+    /** Parameter name for endpoint no shared memory flag. */
+    public static final String PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM = "fs.igfs.%s.endpoint.no_local_shmem";
+
+    /** Parameter name for endpoint no local TCP flag. */
+    public static final String PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP = "fs.igfs.%s.endpoint.no_local_tcp";
+
+    /**
+     * Get string parameter.
+     *
+     * @param cfg Configuration.
+     * @param name Parameter name.
+     * @param authority Authority.
+     * @param dflt Default value.
+     * @return String value.
+     */
+    public static String parameter(Configuration cfg, String name, String authority, String dflt) {
+        return cfg.get(String.format(name, authority != null ? authority : ""), dflt);
+    }
+
+    /**
+     * Get integer parameter.
+     *
+     * @param cfg Configuration.
+     * @param name Parameter name.
+     * @param authority Authority.
+     * @param dflt Default value.
+     * @return Integer value.
+     * @throws IOException In case of parse exception.
+     */
+    public static int parameter(Configuration cfg, String name, String authority, int dflt) throws IOException {
+        String name0 = String.format(name, authority != null ? authority : "");
+
+        try {
+            return cfg.getInt(name0, dflt);
+        }
+        catch (NumberFormatException ignore) {
+            throw new IOException("Failed to parse parameter value to integer: " + name0);
+        }
+    }
+
+    /**
+     * Get boolean parameter.
+     *
+     * @param cfg Configuration.
+     * @param name Parameter name.
+     * @param authority Authority.
+     * @param dflt Default value.
+     * @return Boolean value.
+     */
+    public static boolean parameter(Configuration cfg, String name, String authority, boolean dflt) {
+        return cfg.getBoolean(String.format(name, authority != null ? authority : ""), dflt);
+    }
+
+    /**
+     * Cast Ignite exception to appropriate IO exception.
+     *
+     * @param e Exception to cast.
+     * @return Casted exception.
+     */
+    public static IOException cast(IgniteCheckedException e) {
+        return cast(e, null);
+    }
+
+    /**
+     * Cast Ignite exception to appropriate IO exception.
+     *
+     * @param e Exception to cast.
+     * @param path Path for exceptions.
+     * @return Casted exception.
+     */
+    @SuppressWarnings("unchecked")
+    public static IOException cast(IgniteCheckedException e, @Nullable String path) {
+        assert e != null;
+
+        // First check for any nested IOException; if exists - re-throw it.
+        if (e.hasCause(IOException.class))
+            return e.getCause(IOException.class);
+        else if (e.hasCause(IgfsPathNotFoundException.class))
+            return new FileNotFoundException(path); // TODO: Or PathNotFoundException?
+        else if (e.hasCause(IgfsParentNotDirectoryException.class))
+            return new ParentNotDirectoryException(path);
+        else if (path != null && e.hasCause(IgfsDirectoryNotEmptyException.class))
+            return new PathIsNotEmptyDirectoryException(path);
+        else if (path != null && e.hasCause(IgfsPathAlreadyExistsException.class))
+            return new PathExistsException(path);
+        else {
+            String msg = e.getMessage();
+
+            return msg == null ? new IOException(e) : new IOException(msg, e);
+        }
+    }
+
+    /**
+     * Deletes all files from the given file system.
+     *
+     * @param fs The file system to clean up.
+     * @throws IOException On error.
+     */
+    public static void clear(FileSystem fs) throws IOException {
+        // Delete root contents:
+        FileStatus[] statuses = fs.listStatus(new Path("/"));
+
+        if (statuses != null) {
+            for (FileStatus stat: statuses)
+                fs.delete(stat.getPath(), true);
+        }
+    }
+
+    /**
+     * Deletes all files from the given file system.
+     *
+     * @param fs The file system to clean up.
+     * @throws IOException On error.
+     */
+    public static void clear(AbstractFileSystem fs) throws IOException {
+        // Delete root contents:
+        FileStatus[] statuses = fs.listStatus(new Path("/"));
+
+        if (statuses != null) {
+            for (FileStatus stat: statuses)
+                fs.delete(stat.getPath(), true);
+        }
+    }
+
+    /**
+     * Constructor.
+     */
+    private HadoopIgfsUtils() {
+        // No-op.
+    }
+}
\ No newline at end of file


[42/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
new file mode 100644
index 0000000..39b7c51
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java
@@ -0,0 +1,435 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.io.DataInput;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopDataInStream;
+import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopDataOutStream;
+import org.apache.ignite.internal.processors.hadoop.shuffle.streams.HadoopOffheapBuffer;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_OFFHEAP_PAGE_SIZE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
+
+/**
+ * Base class for all multimaps.
+ */
+public abstract class HadoopMultimapBase implements HadoopMultimap {
+    /** */
+    protected final GridUnsafeMemory mem;
+
+    /** */
+    protected final int pageSize;
+
+    /** */
+    private final Collection<Page> allPages = new ConcurrentLinkedQueue<>();
+
+    /**
+     * @param jobInfo Job info.
+     * @param mem Memory.
+     */
+    protected HadoopMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
+        assert jobInfo != null;
+        assert mem != null;
+
+        this.mem = mem;
+
+        pageSize = get(jobInfo, SHUFFLE_OFFHEAP_PAGE_SIZE, 32 * 1024);
+    }
+
+    /**
+     * @param page Page.
+     */
+    private void deallocate(Page page) {
+        assert page != null;
+
+        mem.release(page.ptr, page.size);
+    }
+
+    /**
+     * @param valPtr Value page pointer.
+     * @param nextValPtr Next value page pointer.
+     */
+    protected void nextValue(long valPtr, long nextValPtr) {
+        mem.writeLong(valPtr, nextValPtr);
+    }
+
+    /**
+     * @param valPtr Value page pointer.
+     * @return Next value page pointer.
+     */
+    protected long nextValue(long valPtr) {
+        return mem.readLong(valPtr);
+    }
+
+    /**
+     * @param valPtr Value page pointer.
+     * @param size Size.
+     */
+    protected void valueSize(long valPtr, int size) {
+        mem.writeInt(valPtr + 8, size);
+    }
+
+    /**
+     * @param valPtr Value page pointer.
+     * @return Value size.
+     */
+    protected int valueSize(long valPtr) {
+        return mem.readInt(valPtr + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        for (Page page : allPages)
+            deallocate(page);
+    }
+
+    /**
+     * Reader for key and value.
+     */
+    protected class ReaderBase implements AutoCloseable {
+        /** */
+        private Object tmp;
+
+        /** */
+        private final HadoopSerialization ser;
+
+        /** */
+        private final HadoopDataInStream in = new HadoopDataInStream(mem);
+
+        /**
+         * @param ser Serialization.
+         */
+        protected ReaderBase(HadoopSerialization ser) {
+            assert ser != null;
+
+            this.ser = ser;
+        }
+
+        /**
+         * @param valPtr Value page pointer.
+         * @return Value.
+         */
+        public Object readValue(long valPtr) {
+            assert valPtr > 0 : valPtr;
+
+            try {
+                return read(valPtr + 12, valueSize(valPtr));
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+
+        /**
+         * Resets temporary object to the given one.
+         *
+         * @param tmp Temporary object for reuse.
+         */
+        public void resetReusedObject(Object tmp) {
+            this.tmp = tmp;
+        }
+
+        /**
+         * @param ptr Pointer.
+         * @param size Object size.
+         * @return Object.
+         */
+        protected Object read(long ptr, long size) throws IgniteCheckedException {
+            in.buffer().set(ptr, size);
+
+            tmp = ser.read(in, tmp);
+
+            return tmp;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            ser.close();
+        }
+    }
+
+    /**
+     * Base class for adders.
+     */
+    protected abstract class AdderBase implements Adder {
+        /** */
+        protected final HadoopSerialization keySer;
+
+        /** */
+        protected final HadoopSerialization valSer;
+
+        /** */
+        private final HadoopDataOutStream out;
+
+        /** */
+        private long writeStart;
+
+        /** Current page. */
+        private Page curPage;
+
+        /**
+         * @param ctx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        protected AdderBase(HadoopTaskContext ctx) throws IgniteCheckedException {
+            valSer = ctx.valueSerialization();
+            keySer = ctx.keySerialization();
+
+            out = new HadoopDataOutStream(mem) {
+                @Override public long move(long size) {
+                    long ptr = super.move(size);
+
+                    if (ptr == 0) // Was not able to move - not enough free space.
+                        ptr = allocateNextPage(size);
+
+                    assert ptr != 0;
+
+                    return ptr;
+                }
+            };
+        }
+
+        /**
+         * @param requestedSize Requested size.
+         * @return Next write pointer.
+         */
+        private long allocateNextPage(long requestedSize) {
+            int writtenSize = writtenSize();
+
+            long newPageSize = nextPageSize(writtenSize + requestedSize);
+            long newPagePtr = mem.allocate(newPageSize);
+
+            HadoopOffheapBuffer b = out.buffer();
+
+            b.set(newPagePtr, newPageSize);
+
+            if (writtenSize != 0) {
+                mem.copyMemory(writeStart, newPagePtr, writtenSize);
+
+                b.move(writtenSize);
+            }
+
+            writeStart = newPagePtr;
+
+            // At this point old page is not needed, so we release it.
+            Page oldPage = curPage;
+
+            curPage = new Page(newPagePtr, newPageSize);
+
+            if (oldPage != null)
+                allPages.add(oldPage);
+
+            return b.move(requestedSize);
+        }
+
+        /**
+         * Get next page size.
+         *
+         * @param required Required amount of data.
+         * @return Next page size.
+         */
+        private long nextPageSize(long required) {
+            long pages = (required / pageSize) + 1;
+
+            long pagesPow2 = nextPowerOfTwo(pages);
+
+            return pagesPow2 * pageSize;
+        }
+
+        /**
+         * Get next power of two which greater or equal to the given number. Naive implementation.
+         *
+         * @param val Number
+         * @return Nearest pow2.
+         */
+        private long nextPowerOfTwo(long val) {
+            long res = 1;
+
+            while (res < val)
+                res = res << 1;
+
+            if (res < 0)
+                throw new IllegalArgumentException("Value is too big to find positive pow2: " + val);
+
+            return res;
+        }
+
+        /**
+         * @return Fixed pointer.
+         */
+        private long fixAlignment() {
+            HadoopOffheapBuffer b = out.buffer();
+
+            long ptr = b.pointer();
+
+            if ((ptr & 7L) != 0) { // Address is not aligned by octet.
+                ptr = (ptr + 8L) & ~7L;
+
+                b.pointer(ptr);
+            }
+
+            return ptr;
+        }
+
+        /**
+         * @param off Offset.
+         * @param o Object.
+         * @return Page pointer.
+         * @throws IgniteCheckedException If failed.
+         */
+        protected long write(int off, Object o, HadoopSerialization ser) throws IgniteCheckedException {
+            writeStart = fixAlignment();
+
+            if (off != 0)
+                out.move(off);
+
+            ser.write(out, o);
+
+            return writeStart;
+        }
+
+        /**
+         * @param size Size.
+         * @return Pointer.
+         */
+        protected long allocate(int size) {
+            writeStart = fixAlignment();
+
+            out.move(size);
+
+            return writeStart;
+        }
+
+        /**
+         * Rewinds local allocation pointer to the given pointer if possible.
+         *
+         * @param ptr Pointer.
+         */
+        protected void localDeallocate(long ptr) {
+            HadoopOffheapBuffer b = out.buffer();
+
+            if (b.isInside(ptr))
+                b.pointer(ptr);
+            else
+                b.reset();
+        }
+
+        /**
+         * @return Written size.
+         */
+        protected int writtenSize() {
+            return (int)(out.buffer().pointer() - writeStart);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
+            throw new UnsupportedOperationException();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            if (curPage != null)
+                allPages.add(curPage);
+
+            keySer.close();
+            valSer.close();
+        }
+    }
+
+    /**
+     * Iterator over values.
+     */
+    protected class ValueIterator implements Iterator<Object> {
+        /** */
+        private long valPtr;
+
+        /** */
+        private final ReaderBase valReader;
+
+        /**
+         * @param valPtr Value page pointer.
+         * @param valReader Value reader.
+         */
+        protected ValueIterator(long valPtr, ReaderBase valReader) {
+            this.valPtr = valPtr;
+            this.valReader = valReader;
+        }
+
+        /**
+         * @param valPtr Head value pointer.
+         */
+        public void head(long valPtr) {
+            this.valPtr = valPtr;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasNext() {
+            return valPtr != 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object next() {
+            if (!hasNext())
+                throw new NoSuchElementException();
+
+            Object res = valReader.readValue(valPtr);
+
+            valPtr = nextValue(valPtr);
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void remove() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    /**
+     * Page.
+     */
+    private static class Page {
+        /** Pointer. */
+        private final long ptr;
+
+        /** Size. */
+        private final long size;
+
+        /**
+         * Constructor.
+         *
+         * @param ptr Pointer.
+         * @param size Size.
+         */
+        public Page(long ptr, long size) {
+            this.ptr = ptr;
+            this.size = size;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
new file mode 100644
index 0000000..7db88bc
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java
@@ -0,0 +1,733 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.collections;
+
+import java.io.DataInput;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Skip list.
+ */
+public class HadoopSkipList extends HadoopMultimapBase {
+    /** */
+    private static final int HEADS_SIZE = 24 + 33 * 8; // Offset + max level is from 0 to 32 inclusive.
+
+    /** Top level. */
+    private final AtomicInteger topLevel = new AtomicInteger(-1);
+
+    /** Heads for all the lists. */
+    private final long heads;
+
+    /** */
+    private final AtomicBoolean visitGuard = new AtomicBoolean();
+
+    /**
+     * @param jobInfo Job info.
+     * @param mem Memory.
+     */
+    public HadoopSkipList(HadoopJobInfo jobInfo, GridUnsafeMemory mem) {
+        super(jobInfo, mem);
+
+        heads = mem.allocate(HEADS_SIZE, true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        super.close();
+
+        mem.release(heads, HEADS_SIZE);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean visit(boolean ignoreLastVisited, Visitor v) throws IgniteCheckedException {
+        if (!visitGuard.compareAndSet(false, true))
+            return false;
+
+        for (long meta = nextMeta(heads, 0); meta != 0L; meta = nextMeta(meta, 0)) {
+            long valPtr = value(meta);
+
+            long lastVisited = ignoreLastVisited ? 0 : lastVisitedValue(meta);
+
+            if (valPtr != lastVisited) {
+                long k = key(meta);
+
+                v.onKey(k + 4, keySize(k));
+
+                lastVisitedValue(meta, valPtr); // Set it to the first value in chain.
+
+                do {
+                    v.onValue(valPtr + 12, valueSize(valPtr));
+
+                    valPtr = nextValue(valPtr);
+                }
+                while (valPtr != lastVisited);
+            }
+        }
+
+        visitGuard.lazySet(false);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException {
+        return new AdderImpl(ctx);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        Input in = new Input(taskCtx);
+
+        Comparator<Object> grpCmp = taskCtx.groupComparator();
+
+        if (grpCmp != null)
+            return new GroupedInput(grpCmp, in);
+
+        return in;
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Key pointer.
+     */
+    private long key(long meta) {
+        return mem.readLong(meta);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param key Key pointer.
+     */
+    private void key(long meta, long key) {
+        mem.writeLong(meta, key);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Value pointer.
+     */
+    private long value(long meta) {
+        return mem.readLongVolatile(meta + 8);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param valPtr Value pointer.
+     */
+    private void value(long meta, long valPtr) {
+        mem.writeLongVolatile(meta + 8, valPtr);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param oldValPtr Old first value pointer.
+     * @param newValPtr New first value pointer.
+     * @return {@code true} If operation succeeded.
+     */
+    private boolean casValue(long meta, long oldValPtr, long newValPtr) {
+        return mem.casLong(meta + 8, oldValPtr, newValPtr);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @return Last visited value pointer.
+     */
+    private long lastVisitedValue(long meta) {
+        return mem.readLong(meta + 16);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param valPtr Last visited value pointer.
+     */
+    private void lastVisitedValue(long meta, long valPtr) {
+        mem.writeLong(meta + 16, valPtr);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param level Level.
+     * @return Next meta pointer.
+     */
+    private long nextMeta(long meta, int level) {
+        assert meta > 0 : meta;
+
+        return mem.readLongVolatile(meta + 24 + 8 * level);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param level Level.
+     * @param oldNext Old next meta pointer.
+     * @param newNext New next meta pointer.
+     * @return {@code true} If operation succeeded.
+     */
+    private boolean casNextMeta(long meta, int level, long oldNext, long newNext) {
+        assert meta > 0 : meta;
+
+        return mem.casLong(meta + 24 + 8 * level, oldNext, newNext);
+    }
+
+    /**
+     * @param meta Meta pointer.
+     * @param level Level.
+     * @param nextMeta Next meta.
+     */
+    private void nextMeta(long meta, int level, long nextMeta) {
+        assert meta != 0;
+
+        mem.writeLong(meta + 24 + 8 * level, nextMeta);
+    }
+
+    /**
+     * @param keyPtr Key pointer.
+     * @return Key size.
+     */
+    private int keySize(long keyPtr) {
+        return mem.readInt(keyPtr);
+    }
+
+    /**
+     * @param keyPtr Key pointer.
+     * @param keySize Key size.
+     */
+    private void keySize(long keyPtr, int keySize) {
+        mem.writeInt(keyPtr, keySize);
+    }
+
+    /**
+     * @param rnd Random.
+     * @return Next level.
+     */
+    public static int randomLevel(Random rnd) {
+        int x = rnd.nextInt();
+
+        int level = 0;
+
+        while ((x & 1) != 0) { // Count sequential 1 bits.
+            level++;
+
+            x >>>= 1;
+        }
+
+        return level;
+    }
+
+    /**
+     * Reader.
+     */
+    private class Reader extends ReaderBase {
+        /**
+         * @param ser Serialization.
+         */
+        protected Reader(HadoopSerialization ser) {
+            super(ser);
+        }
+
+        /**
+         * @param meta Meta pointer.
+         * @return Key.
+         */
+        public Object readKey(long meta) {
+            assert meta > 0 : meta;
+
+            long k = key(meta);
+
+            try {
+                return read(k + 4, keySize(k));
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+    }
+
+    /**
+     * Adder.
+     */
+    private class AdderImpl extends AdderBase {
+        /** */
+        private final Comparator<Object> cmp;
+
+        /** */
+        private final Random rnd = new GridRandom();
+
+        /** */
+        private final GridLongList stack = new GridLongList(16);
+
+        /** */
+        private final Reader keyReader;
+
+        /**
+         * @param ctx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException {
+            super(ctx);
+
+            keyReader = new Reader(keySer);
+
+            cmp = ctx.sortComparator();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(Object key, Object val) throws IgniteCheckedException {
+            A.notNull(val, "val");
+
+            add(key, val);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Key addKey(DataInput in, @Nullable Key reuse) throws IgniteCheckedException {
+            KeyImpl k = reuse == null ? new KeyImpl() : (KeyImpl)reuse;
+
+            k.tmpKey = keySer.read(in, k.tmpKey);
+
+            k.meta = add(k.tmpKey, null);
+
+            return k;
+        }
+
+        /**
+         * @param key Key.
+         * @param val Value.
+         * @param level Level.
+         * @return Meta pointer.
+         */
+        private long createMeta(long key, long val, int level) {
+            int size = 32 + 8 * level;
+
+            long meta = allocate(size);
+
+            key(meta, key);
+            value(meta, val);
+            lastVisitedValue(meta, 0L);
+
+            for (int i = 32; i < size; i += 8) // Fill with 0.
+                mem.writeLong(meta + i, 0L);
+
+            return meta;
+        }
+
+        /**
+         * @param key Key.
+         * @return Pointer.
+         * @throws IgniteCheckedException If failed.
+         */
+        private long writeKey(Object key) throws IgniteCheckedException {
+            long keyPtr = write(4, key, keySer);
+            int keySize = writtenSize() - 4;
+
+            keySize(keyPtr, keySize);
+
+            return keyPtr;
+        }
+
+        /**
+         * @param prevMeta Previous meta.
+         * @param meta Next meta.
+         */
+        private void stackPush(long prevMeta, long meta) {
+            stack.add(prevMeta);
+            stack.add(meta);
+        }
+
+        /**
+         * Drops last remembered frame from the stack.
+         */
+        private void stackPop() {
+            stack.pop(2);
+        }
+
+        /**
+         * @param key Key.
+         * @param val Value.
+         * @return Meta pointer.
+         * @throws IgniteCheckedException If failed.
+         */
+        private long add(Object key, @Nullable Object val) throws IgniteCheckedException {
+            assert key != null;
+
+            stack.clear();
+
+            long valPtr = 0;
+
+            if (val != null) { // Write value.
+                valPtr = write(12, val, valSer);
+                int valSize = writtenSize() - 12;
+
+                nextValue(valPtr, 0);
+                valueSize(valPtr, valSize);
+            }
+
+            long keyPtr = 0;
+            long newMeta = 0;
+            int newMetaLevel = -1;
+
+            long prevMeta = heads;
+            int level = topLevel.get();
+            long meta = level < 0 ? 0 : nextMeta(heads, level);
+
+            for (;;) {
+                if (level < 0) { // We did not find our key, trying to add new meta.
+                    if (keyPtr == 0) { // Write key and create meta only once.
+                        keyPtr = writeKey(key);
+
+                        newMetaLevel = randomLevel(rnd);
+                        newMeta = createMeta(keyPtr, valPtr, newMetaLevel);
+                    }
+
+                    nextMeta(newMeta, 0, meta); // Set next to new meta before publishing.
+
+                    if (casNextMeta(prevMeta, 0, meta, newMeta)) { // New key was added successfully.
+                        laceUp(key, newMeta, newMetaLevel);
+
+                        return newMeta;
+                    }
+                    else { // Add failed, need to check out what was added by another thread.
+                        meta = nextMeta(prevMeta, level = 0);
+
+                        stackPop();
+                    }
+                }
+
+                int cmpRes = cmp(key, meta);
+
+                if (cmpRes == 0) { // Key found.
+                    if (newMeta != 0)  // Deallocate if we've allocated something.
+                        localDeallocate(keyPtr);
+
+                    if (valPtr == 0) // Only key needs to be added.
+                        return meta;
+
+                    for (;;) { // Add value for the key found.
+                        long nextVal = value(meta);
+
+                        nextValue(valPtr, nextVal);
+
+                        if (casValue(meta, nextVal, valPtr))
+                            return meta;
+                    }
+                }
+
+                assert cmpRes != 0;
+
+                if (cmpRes > 0) { // Go right.
+                    prevMeta = meta;
+                    meta = nextMeta(meta, level);
+
+                    if (meta != 0) // If nothing to the right then go down.
+                        continue;
+                }
+
+                while (--level >= 0) { // Go down.
+                    stackPush(prevMeta, meta); // Remember the path.
+
+                    long nextMeta = nextMeta(prevMeta, level);
+
+                    if (nextMeta != meta) { // If the meta is the same as on upper level go deeper.
+                        meta = nextMeta;
+
+                        assert meta != 0;
+
+                        break;
+                    }
+                }
+            }
+        }
+
+        /**
+         * @param key Key.
+         * @param meta Meta pointer.
+         * @return Comparison result.
+         */
+        @SuppressWarnings("unchecked")
+        private int cmp(Object key, long meta) {
+            assert meta != 0;
+
+            return cmp.compare(key, keyReader.readKey(meta));
+        }
+
+        /**
+         * Adds appropriate index links between metas.
+         *
+         * @param newMeta Just added meta.
+         * @param newMetaLevel New level.
+         */
+        private void laceUp(Object key, long newMeta, int newMetaLevel) {
+            for (int level = 1; level <= newMetaLevel; level++) { // Go from the bottom up.
+                long prevMeta = heads;
+                long meta = 0;
+
+                if (!stack.isEmpty()) { // Get the path back.
+                    meta = stack.remove();
+                    prevMeta = stack.remove();
+                }
+
+                for (;;) {
+                    nextMeta(newMeta, level, meta);
+
+                    if (casNextMeta(prevMeta, level, meta, newMeta))
+                        break;
+
+                    long oldMeta = meta;
+
+                    meta = nextMeta(prevMeta, level); // Reread meta.
+
+                    for (;;) {
+                        int cmpRes = cmp(key, meta);
+
+                        if (cmpRes > 0) { // Go right.
+                            prevMeta = meta;
+                            meta = nextMeta(prevMeta, level);
+
+                            if (meta != oldMeta) // Old meta already known to be greater than ours or is 0.
+                                continue;
+                        }
+
+                        assert cmpRes != 0; // Two different metas with equal keys must be impossible.
+
+                        break; // Retry cas.
+                    }
+                }
+            }
+
+            if (!stack.isEmpty())
+                return; // Our level already lower than top.
+
+            for (;;) { // Raise top level.
+                int top = topLevel.get();
+
+                if (newMetaLevel <= top || topLevel.compareAndSet(top, newMetaLevel))
+                    break;
+            }
+        }
+
+        /**
+         * Key.
+         */
+        private class KeyImpl implements Key {
+            /** */
+            private long meta;
+
+            /** */
+            private Object tmpKey;
+
+            /**
+             * @return Meta pointer for the key.
+             */
+            public long address() {
+                return meta;
+            }
+
+            /**
+             * @param val Value.
+             */
+            @Override public void add(Value val) {
+                int size = val.size();
+
+                long valPtr = allocate(size + 12);
+
+                val.copyTo(valPtr + 12);
+
+                valueSize(valPtr, size);
+
+                long nextVal;
+
+                do {
+                    nextVal = value(meta);
+
+                    nextValue(valPtr, nextVal);
+                }
+                while(!casValue(meta, nextVal, valPtr));
+            }
+        }
+    }
+
+    /**
+     * Task input.
+     */
+    private class Input implements HadoopTaskInput {
+        /** */
+        private long metaPtr = heads;
+
+        /** */
+        private final Reader keyReader;
+
+        /** */
+        private final Reader valReader;
+
+        /**
+         * @param taskCtx Task context.
+         * @throws IgniteCheckedException If failed.
+         */
+        private Input(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+            keyReader = new Reader(taskCtx.keySerialization());
+            valReader = new Reader(taskCtx.valueSerialization());
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean next() {
+            metaPtr = nextMeta(metaPtr, 0);
+
+            return metaPtr != 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object key() {
+            return keyReader.readKey(metaPtr);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Iterator<?> values() {
+            return new ValueIterator(value(metaPtr), valReader);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            keyReader.close();
+            valReader.close();
+        }
+    }
+
+    /**
+     * Grouped input using grouping comparator.
+     */
+    private class GroupedInput implements HadoopTaskInput {
+        /** */
+        private final Comparator<Object> grpCmp;
+
+        /** */
+        private final Input in;
+
+        /** */
+        private Object prevKey;
+
+        /** */
+        private Object nextKey;
+
+        /** */
+        private final GridLongList vals = new GridLongList();
+
+        /**
+         * @param grpCmp Grouping comparator.
+         * @param in Input.
+         */
+        private GroupedInput(Comparator<Object> grpCmp, Input in) {
+            this.grpCmp = grpCmp;
+            this.in = in;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean next() {
+            if (prevKey == null) { // First call.
+                if (!in.next())
+                    return false;
+
+                prevKey = in.key();
+
+                assert prevKey != null;
+
+                in.keyReader.resetReusedObject(null); // We need 2 instances of key object for comparison.
+
+                vals.add(value(in.metaPtr));
+            }
+            else {
+                if (in.metaPtr == 0) // We reached the end of the input.
+                    return false;
+
+                vals.clear();
+
+                vals.add(value(in.metaPtr));
+
+                in.keyReader.resetReusedObject(prevKey); // Switch key instances.
+
+                prevKey = nextKey;
+            }
+
+            while (in.next()) { // Fill with head value pointers with equal keys.
+                if (grpCmp.compare(prevKey, nextKey = in.key()) == 0)
+                    vals.add(value(in.metaPtr));
+                else
+                    break;
+            }
+
+            assert !vals.isEmpty();
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object key() {
+            return prevKey;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Iterator<?> values() {
+            assert !vals.isEmpty();
+
+            final ValueIterator valIter = new ValueIterator(vals.get(0), in.valReader);
+
+            return new Iterator<Object>() {
+                /** */
+                private int idx;
+
+                @Override public boolean hasNext() {
+                    if (!valIter.hasNext()) {
+                        if (++idx == vals.size())
+                            return false;
+
+                        valIter.head(vals.get(idx));
+
+                        assert valIter.hasNext();
+                    }
+
+                    return true;
+                }
+
+                @Override public Object next() {
+                    return valIter.next();
+                }
+
+                @Override public void remove() {
+                    valIter.remove();
+                }
+            };
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            in.close();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
new file mode 100644
index 0000000..3b5fa15
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+
+/**
+ * Data input stream.
+ */
+public class HadoopDataInStream extends InputStream implements DataInput {
+    /** */
+    private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0);
+
+    /** */
+    private final GridUnsafeMemory mem;
+
+    /**
+     * @param mem Memory.
+     */
+    public HadoopDataInStream(GridUnsafeMemory mem) {
+        assert mem != null;
+
+        this.mem = mem;
+    }
+
+    /**
+     * @return Buffer.
+     */
+    public HadoopOffheapBuffer buffer() {
+        return buf;
+    }
+
+    /**
+     * @param size Size.
+     * @return Old pointer.
+     */
+    protected long move(long size) throws IOException {
+        long ptr = buf.move(size);
+
+        assert ptr != 0;
+
+        return ptr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int read() throws IOException {
+        return readUnsignedByte();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int read(byte[] b, int off, int len) throws IOException {
+        readFully(b, off, len);
+
+        return len;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long skip(long n) throws IOException {
+        move(n);
+
+        return n;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFully(byte[] b) throws IOException {
+        readFully(b, 0, b.length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readFully(byte[] b, int off, int len) throws IOException {
+        mem.readBytes(move(len), b, off, len);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int skipBytes(int n) throws IOException {
+        move(n);
+
+        return n;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readBoolean() throws IOException {
+        byte res = readByte();
+
+        if (res == 1)
+            return true;
+
+        assert res == 0 : res;
+
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte readByte() throws IOException {
+        return mem.readByte(move(1));
+    }
+
+    /** {@inheritDoc} */
+    @Override public int readUnsignedByte() throws IOException {
+        return readByte() & 0xff;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short readShort() throws IOException {
+        return mem.readShort(move(2));
+    }
+
+    /** {@inheritDoc} */
+    @Override public int readUnsignedShort() throws IOException {
+        return readShort() & 0xffff;
+    }
+
+    /** {@inheritDoc} */
+    @Override public char readChar() throws IOException {
+        return (char)readShort();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int readInt() throws IOException {
+        return mem.readInt(move(4));
+    }
+
+    /** {@inheritDoc} */
+    @Override public long readLong() throws IOException {
+        return mem.readLong(move(8));
+    }
+
+    /** {@inheritDoc} */
+    @Override public float readFloat() throws IOException {
+        return mem.readFloat(move(4));
+    }
+
+    /** {@inheritDoc} */
+    @Override public double readDouble() throws IOException {
+        return mem.readDouble(move(8));
+    }
+
+    /** {@inheritDoc} */
+    @Override public String readLine() throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String readUTF() throws IOException {
+        byte[] bytes = new byte[readInt()];
+
+        if (bytes.length != 0)
+            readFully(bytes);
+
+        return new String(bytes, StandardCharsets.UTF_8);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
new file mode 100644
index 0000000..f7b1a73
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
+
+import java.io.DataOutput;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+
+/**
+ * Data output stream.
+ */
+public class HadoopDataOutStream extends OutputStream implements DataOutput {
+    /** */
+    private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0);
+
+    /** */
+    private final GridUnsafeMemory mem;
+
+    /**
+     * @param mem Memory.
+     */
+    public HadoopDataOutStream(GridUnsafeMemory mem) {
+        this.mem = mem;
+    }
+
+    /**
+     * @return Buffer.
+     */
+    public HadoopOffheapBuffer buffer() {
+        return buf;
+    }
+
+    /**
+     * @param size Size.
+     * @return Old pointer or {@code 0} if move was impossible.
+     */
+    public long move(long size) {
+        return buf.move(size);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(int b) {
+        writeByte(b);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(byte[] b) {
+        write(b, 0, b.length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(byte[] b, int off, int len) {
+        GridUnsafe.copyMemory(b, GridUnsafe.BYTE_ARR_OFF + off, null, move(len), len);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeBoolean(boolean v) {
+        writeByte(v ? 1 : 0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeByte(int v) {
+        mem.writeByte(move(1), (byte)v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeShort(int v) {
+        mem.writeShort(move(2), (short)v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeChar(int v) {
+        writeShort(v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeInt(int v) {
+        mem.writeInt(move(4), v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeLong(long v) {
+        mem.writeLong(move(8), v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeFloat(float v) {
+        mem.writeFloat(move(4), v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeDouble(double v) {
+        mem.writeDouble(move(8), v);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeBytes(String s) {
+        writeUTF(s);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeChars(String s) {
+        writeUTF(s);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeUTF(String s) {
+        byte[] b = s.getBytes(StandardCharsets.UTF_8);
+
+        writeInt(b.length);
+        write(b);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
new file mode 100644
index 0000000..acc9be6
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.shuffle.streams;
+
+/**
+ * Offheap buffer.
+ */
+public class HadoopOffheapBuffer {
+    /** Buffer begin address. */
+    private long bufPtr;
+
+    /** The first address we do not own. */
+    private long bufEnd;
+
+    /** Current read or write pointer. */
+    private long posPtr;
+
+    /**
+     * @param bufPtr Pointer to buffer begin.
+     * @param bufSize Size of the buffer.
+     */
+    public HadoopOffheapBuffer(long bufPtr, long bufSize) {
+        set(bufPtr, bufSize);
+    }
+
+    /**
+     * @param bufPtr Pointer to buffer begin.
+     * @param bufSize Size of the buffer.
+     */
+    public void set(long bufPtr, long bufSize) {
+        this.bufPtr = bufPtr;
+
+        posPtr = bufPtr;
+        bufEnd = bufPtr + bufSize;
+    }
+
+    /**
+     * @return Pointer to internal buffer begin.
+     */
+    public long begin() {
+        return bufPtr;
+    }
+
+    /**
+     * @return Buffer capacity.
+     */
+    public long capacity() {
+        return bufEnd - bufPtr;
+    }
+
+    /**
+     * @return Remaining capacity.
+     */
+    public long remaining() {
+        return bufEnd - posPtr;
+    }
+
+    /**
+     * @return Absolute pointer to the current position inside of the buffer.
+     */
+    public long pointer() {
+        return posPtr;
+    }
+
+    /**
+     * @param ptr Absolute pointer to the current position inside of the buffer.
+     */
+    public void pointer(long ptr) {
+        assert ptr >= bufPtr : bufPtr + " <= " + ptr;
+        assert ptr <= bufEnd : bufEnd + " <= " + bufPtr;
+
+        posPtr = ptr;
+    }
+
+    /**
+     * @param size Size move on.
+     * @return Old position pointer or {@code 0} if move goes beyond the end of the buffer.
+     */
+    public long move(long size) {
+        assert size > 0 : size;
+
+        long oldPos = posPtr;
+        long newPos = oldPos + size;
+
+        if (newPos > bufEnd)
+            return 0;
+
+        posPtr = newPos;
+
+        return oldPos;
+    }
+
+    /**
+     * @param ptr Pointer.
+     * @return {@code true} If the given pointer is inside of this buffer.
+     */
+    public boolean isInside(long ptr) {
+        return ptr >= bufPtr && ptr <= bufEnd;
+    }
+
+    /**
+     * Resets position to the beginning of buffer.
+     */
+    public void reset() {
+        posPtr = bufPtr;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
new file mode 100644
index 0000000..5ede18e
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+
+/**
+ * Task executor.
+ */
+public class HadoopEmbeddedTaskExecutor extends HadoopTaskExecutorAdapter {
+    /** Job tracker. */
+    private HadoopJobTracker jobTracker;
+
+    /** */
+    private final ConcurrentMap<HadoopJobId, Collection<HadoopRunnableTask>> jobs = new ConcurrentHashMap<>();
+
+    /** Executor service to run tasks. */
+    private HadoopExecutorService exec;
+
+    /** {@inheritDoc} */
+    @Override public void onKernalStart() throws IgniteCheckedException {
+        super.onKernalStart();
+
+        jobTracker = ctx.jobTracker();
+
+        exec = new HadoopExecutorService(log, ctx.kernalContext().gridName(),
+            ctx.configuration().getMaxParallelTasks(), ctx.configuration().getMaxTaskQueueSize());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onKernalStop(boolean cancel) {
+        if (exec != null) {
+            exec.shutdown(3000);
+
+            if (cancel) {
+                for (HadoopJobId jobId : jobs.keySet())
+                    cancelTasks(jobId);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void stop(boolean cancel) {
+        if (exec != null && !exec.shutdown(30000))
+            U.warn(log, "Failed to finish running tasks in 30 sec.");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void run(final HadoopJob job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException {
+        if (log.isDebugEnabled())
+            log.debug("Submitting tasks for local execution [locNodeId=" + ctx.localNodeId() +
+                ", tasksCnt=" + tasks.size() + ']');
+
+        Collection<HadoopRunnableTask> executedTasks = jobs.get(job.id());
+
+        if (executedTasks == null) {
+            executedTasks = new GridConcurrentHashSet<>();
+
+            Collection<HadoopRunnableTask> extractedCol = jobs.put(job.id(), executedTasks);
+
+            assert extractedCol == null;
+        }
+
+        final Collection<HadoopRunnableTask> finalExecutedTasks = executedTasks;
+
+        for (final HadoopTaskInfo info : tasks) {
+            assert info != null;
+
+            HadoopRunnableTask task = new HadoopRunnableTask(log, job, ctx.shuffle().memory(), info,
+                ctx.localNodeId()) {
+                @Override protected void onTaskFinished(HadoopTaskStatus status) {
+                    if (log.isDebugEnabled())
+                        log.debug("Finished task execution [jobId=" + job.id() + ", taskInfo=" + info + ", " +
+                            "waitTime=" + waitTime() + ", execTime=" + executionTime() + ']');
+
+                    finalExecutedTasks.remove(this);
+
+                    jobTracker.onTaskFinished(info, status);
+                }
+
+                @Override protected HadoopTaskInput createInput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+                    return ctx.shuffle().input(taskCtx);
+                }
+
+                @Override protected HadoopTaskOutput createOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+                    return ctx.shuffle().output(taskCtx);
+                }
+            };
+
+            executedTasks.add(task);
+
+            exec.submit(task);
+        }
+    }
+
+    /**
+     * Cancels all currently running tasks for given job ID and cancels scheduled execution of tasks
+     * for this job ID.
+     * <p>
+     * It is guaranteed that this method will not be called concurrently with
+     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via
+     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called.
+     *
+     * @param jobId Job ID to cancel.
+     */
+    @Override public void cancelTasks(HadoopJobId jobId) {
+        Collection<HadoopRunnableTask> executedTasks = jobs.get(jobId);
+
+        if (executedTasks != null) {
+            for (HadoopRunnableTask task : executedTasks)
+                task.cancel();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException {
+        if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) {
+            Collection<HadoopRunnableTask> executedTasks = jobs.remove(meta.jobId());
+
+            assert executedTasks == null || executedTasks.isEmpty();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
new file mode 100644
index 0000000..993ecc9
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+
+import java.util.Collection;
+import java.util.concurrent.Callable;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.internal.util.worker.GridWorkerListener;
+import org.apache.ignite.internal.util.worker.GridWorkerListenerAdapter;
+import org.apache.ignite.thread.IgniteThread;
+import org.jsr166.ConcurrentHashMap8;
+
+import static java.util.Collections.newSetFromMap;
+
+/**
+ * Executor service without thread pooling.
+ */
+public class HadoopExecutorService {
+    /** */
+    private final LinkedBlockingQueue<Callable<?>> queue;
+
+    /** */
+    private final Collection<GridWorker> workers = newSetFromMap(new ConcurrentHashMap8<GridWorker, Boolean>());
+
+    /** */
+    private final AtomicInteger active = new AtomicInteger();
+
+    /** */
+    private final int maxTasks;
+
+    /** */
+    private final String gridName;
+
+    /** */
+    private final IgniteLogger log;
+
+    /** */
+    private volatile boolean shutdown;
+
+    /** */
+    private final GridWorkerListener lsnr = new GridWorkerListenerAdapter() {
+            @Override public void onStopped(GridWorker w) {
+                workers.remove(w);
+
+                if (shutdown) {
+                    active.decrementAndGet();
+
+                    return;
+                }
+
+                Callable<?> task = queue.poll();
+
+                if (task != null)
+                    startThread(task);
+                else {
+                    active.decrementAndGet();
+
+                    if (!queue.isEmpty())
+                        startFromQueue();
+                }
+            }
+        };
+
+    /**
+     * @param log Logger.
+     * @param gridName Grid name.
+     * @param maxTasks Max number of tasks.
+     * @param maxQueue Max queue length.
+     */
+    public HadoopExecutorService(IgniteLogger log, String gridName, int maxTasks, int maxQueue) {
+        assert maxTasks > 0 : maxTasks;
+        assert maxQueue > 0 : maxQueue;
+
+        this.maxTasks = maxTasks;
+        this.queue = new LinkedBlockingQueue<>(maxQueue);
+        this.gridName = gridName;
+        this.log = log.getLogger(HadoopExecutorService.class);
+    }
+
+    /**
+     * @return Number of active workers.
+     */
+    public int active() {
+        return workers.size();
+    }
+
+    /**
+     * Submit task.
+     *
+     * @param task Task.
+     */
+    public void submit(Callable<?> task) {
+        while (queue.isEmpty()) {
+            int active0 = active.get();
+
+            if (active0 == maxTasks)
+                break;
+
+            if (active.compareAndSet(active0, active0 + 1)) {
+                startThread(task);
+
+                return; // Started in new thread bypassing queue.
+            }
+        }
+
+        try {
+            while (!queue.offer(task, 100, TimeUnit.MILLISECONDS)) {
+                if (shutdown)
+                    return; // Rejected due to shutdown.
+            }
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            return;
+        }
+
+        startFromQueue();
+    }
+
+    /**
+     * Attempts to start task from queue.
+     */
+    private void startFromQueue() {
+        do {
+            int active0 = active.get();
+
+            if (active0 == maxTasks)
+                break;
+
+            if (active.compareAndSet(active0, active0 + 1)) {
+                Callable<?> task = queue.poll();
+
+                if (task == null) {
+                    int res = active.decrementAndGet();
+
+                    assert res >= 0 : res;
+
+                    break;
+                }
+
+                startThread(task);
+            }
+        }
+        while (!queue.isEmpty());
+    }
+
+    /**
+     * @param task Task.
+     */
+    private void startThread(final Callable<?> task) {
+        String workerName;
+
+        if (task instanceof HadoopRunnableTask) {
+            final HadoopTaskInfo i = ((HadoopRunnableTask)task).taskInfo();
+
+            workerName = "Hadoop-task-" + i.jobId() + "-" + i.type() + "-" + i.taskNumber() + "-" + i.attempt();
+        }
+        else
+            workerName = task.toString();
+
+        GridWorker w = new GridWorker(gridName, workerName, log, lsnr) {
+            @Override protected void body() {
+                try {
+                    task.call();
+                }
+                catch (Exception e) {
+                    log.error("Failed to execute task: " + task, e);
+                }
+            }
+        };
+
+        workers.add(w);
+
+        if (shutdown)
+            w.cancel();
+
+        new IgniteThread(w).start();
+    }
+
+    /**
+     * Shuts down this executor service.
+     *
+     * @param awaitTimeMillis Time in milliseconds to wait for tasks completion.
+     * @return {@code true} If all tasks completed.
+     */
+    public boolean shutdown(long awaitTimeMillis) {
+        shutdown = true;
+
+        for (GridWorker w : workers)
+            w.cancel();
+
+        while (awaitTimeMillis > 0 && !workers.isEmpty()) {
+            try {
+                Thread.sleep(100);
+
+                awaitTimeMillis -= 100;
+            }
+            catch (InterruptedException e) {
+                break;
+            }
+        }
+
+        return workers.isEmpty();
+    }
+
+    /**
+     * @return {@code true} If method {@linkplain #shutdown(long)} was already called.
+     */
+    public boolean isShutdown() {
+        return shutdown;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
new file mode 100644
index 0000000..a57efe6
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInput;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskOutput;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopHashMultimap;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopMultimap;
+import org.apache.ignite.internal.processors.hadoop.shuffle.collections.HadoopSkipList;
+import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.COMBINER_HASHMAP_SIZE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_COMBINER_NO_SORTING;
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.get;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.COMBINE;
+import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.MAP;
+
+/**
+ * Runnable task.
+ */
+public abstract class HadoopRunnableTask implements Callable<Void> {
+    /** */
+    private final GridUnsafeMemory mem;
+
+    /** */
+    private final IgniteLogger log;
+
+    /** */
+    private final HadoopJob job;
+
+    /** Task to run. */
+    private final HadoopTaskInfo info;
+
+    /** Submit time. */
+    private final long submitTs = U.currentTimeMillis();
+
+    /** Execution start timestamp. */
+    private long execStartTs;
+
+    /** Execution end timestamp. */
+    private long execEndTs;
+
+    /** */
+    private HadoopMultimap combinerInput;
+
+    /** */
+    private volatile HadoopTaskContext ctx;
+
+    /** Set if task is to cancelling. */
+    private volatile boolean cancelled;
+
+    /** Node id. */
+    private UUID nodeId;
+
+    /**
+     * @param log Log.
+     * @param job Job.
+     * @param mem Memory.
+     * @param info Task info.
+     * @param nodeId Node id.
+     */
+    protected HadoopRunnableTask(IgniteLogger log, HadoopJob job, GridUnsafeMemory mem, HadoopTaskInfo info,
+        UUID nodeId) {
+        this.nodeId = nodeId;
+        this.log = log.getLogger(HadoopRunnableTask.class);
+        this.job = job;
+        this.mem = mem;
+        this.info = info;
+    }
+
+    /**
+     * @return Wait time.
+     */
+    public long waitTime() {
+        return execStartTs - submitTs;
+    }
+
+    /**
+     * @return Execution time.
+     */
+    public long executionTime() {
+        return execEndTs - execStartTs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Void call() throws IgniteCheckedException {
+        ctx = job.getTaskContext(info);
+
+        return ctx.runAsJobOwner(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                call0();
+
+                return null;
+            }
+        });
+    }
+
+    /**
+     * Implements actual task running.
+     * @throws IgniteCheckedException
+     */
+    void call0() throws IgniteCheckedException {
+        execStartTs = U.currentTimeMillis();
+
+        Throwable err = null;
+
+        HadoopTaskState state = HadoopTaskState.COMPLETED;
+
+        HadoopPerformanceCounter perfCntr = null;
+
+        try {
+            perfCntr = HadoopPerformanceCounter.getCounter(ctx.counters(), nodeId);
+
+            perfCntr.onTaskSubmit(info, submitTs);
+            perfCntr.onTaskPrepare(info, execStartTs);
+
+            ctx.prepareTaskEnvironment();
+
+            runTask(perfCntr);
+
+            if (info.type() == MAP && job.info().hasCombiner()) {
+                ctx.taskInfo(new HadoopTaskInfo(COMBINE, info.jobId(), info.taskNumber(), info.attempt(), null));
+
+                try {
+                    runTask(perfCntr);
+                }
+                finally {
+                    ctx.taskInfo(info);
+                }
+            }
+        }
+        catch (HadoopTaskCancelledException ignored) {
+            state = HadoopTaskState.CANCELED;
+        }
+        catch (Throwable e) {
+            state = HadoopTaskState.FAILED;
+            err = e;
+
+            U.error(log, "Task execution failed.", e);
+
+            if (e instanceof Error)
+                throw e;
+        }
+        finally {
+            execEndTs = U.currentTimeMillis();
+
+            if (perfCntr != null)
+                perfCntr.onTaskFinish(info, execEndTs);
+
+            onTaskFinished(new HadoopTaskStatus(state, err, ctx==null ? null : ctx.counters()));
+
+            if (combinerInput != null)
+                combinerInput.close();
+
+            if (ctx != null)
+                ctx.cleanupTaskEnvironment();
+        }
+    }
+
+    /**
+     * @param perfCntr Performance counter.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void runTask(HadoopPerformanceCounter perfCntr) throws IgniteCheckedException {
+        if (cancelled)
+            throw new HadoopTaskCancelledException("Task cancelled.");
+
+        try (HadoopTaskOutput out = createOutputInternal(ctx);
+             HadoopTaskInput in = createInputInternal(ctx)) {
+
+            ctx.input(in);
+            ctx.output(out);
+
+            perfCntr.onTaskStart(ctx.taskInfo(), U.currentTimeMillis());
+
+            ctx.run();
+        }
+    }
+
+    /**
+     * Cancel the executed task.
+     */
+    public void cancel() {
+        cancelled = true;
+
+        if (ctx != null)
+            ctx.cancel();
+    }
+
+    /**
+     * @param status Task status.
+     */
+    protected abstract void onTaskFinished(HadoopTaskStatus status);
+
+    /**
+     * @param ctx Task context.
+     * @return Task input.
+     * @throws IgniteCheckedException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    private HadoopTaskInput createInputInternal(HadoopTaskContext ctx) throws IgniteCheckedException {
+        switch (ctx.taskInfo().type()) {
+            case SETUP:
+            case MAP:
+            case COMMIT:
+            case ABORT:
+                return null;
+
+            case COMBINE:
+                assert combinerInput != null;
+
+                return combinerInput.input(ctx);
+
+            default:
+                return createInput(ctx);
+        }
+    }
+
+    /**
+     * @param ctx Task context.
+     * @return Input.
+     * @throws IgniteCheckedException If failed.
+     */
+    protected abstract HadoopTaskInput createInput(HadoopTaskContext ctx) throws IgniteCheckedException;
+
+    /**
+     * @param ctx Task info.
+     * @return Output.
+     * @throws IgniteCheckedException If failed.
+     */
+    protected abstract HadoopTaskOutput createOutput(HadoopTaskContext ctx) throws IgniteCheckedException;
+
+    /**
+     * @param ctx Task info.
+     * @return Task output.
+     * @throws IgniteCheckedException If failed.
+     */
+    private HadoopTaskOutput createOutputInternal(HadoopTaskContext ctx) throws IgniteCheckedException {
+        switch (ctx.taskInfo().type()) {
+            case SETUP:
+            case REDUCE:
+            case COMMIT:
+            case ABORT:
+                return null;
+
+            case MAP:
+                if (job.info().hasCombiner()) {
+                    assert combinerInput == null;
+
+                    combinerInput = get(job.info(), SHUFFLE_COMBINER_NO_SORTING, false) ?
+                        new HadoopHashMultimap(job.info(), mem, get(job.info(), COMBINER_HASHMAP_SIZE, 8 * 1024)):
+                        new HadoopSkipList(job.info(), mem); // TODO replace with red-black tree
+
+                    return combinerInput.startAdding(ctx);
+                }
+
+            default:
+                return createOutput(ctx);
+        }
+    }
+
+    /**
+     * @return Task info.
+     */
+    public HadoopTaskInfo taskInfo() {
+        return info;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
new file mode 100644
index 0000000..f13c76a
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+import java.util.Collection;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopComponent;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobMetadata;
+
+/**
+ * Common superclass for task executor.
+ */
+public abstract class HadoopTaskExecutorAdapter extends HadoopComponent {
+    /**
+     * Runs tasks.
+     *
+     * @param job Job.
+     * @param tasks Tasks.
+     * @throws IgniteCheckedException If failed.
+     */
+    public abstract void run(final HadoopJob job, Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException;
+
+    /**
+     * Cancels all currently running tasks for given job ID and cancels scheduled execution of tasks
+     * for this job ID.
+     * <p>
+     * It is guaranteed that this method will not be called concurrently with
+     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via
+     * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called.
+     *
+     * @param jobId Job ID to cancel.
+     */
+    public abstract void cancelTasks(HadoopJobId jobId) throws IgniteCheckedException;
+
+    /**
+     * On job state change callback;
+     *
+     * @param meta Job metadata.
+     */
+    public abstract void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
new file mode 100644
index 0000000..b22d291
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+/**
+* State of the task.
+*/
+public enum HadoopTaskState {
+    /** Running task. */
+    RUNNING,
+
+    /** Completed task. */
+    COMPLETED,
+
+    /** Failed task. */
+    FAILED,
+
+    /** Canceled task. */
+    CANCELED,
+
+    /** Process crashed. */
+    CRASHED
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
new file mode 100644
index 0000000..fa09ff7
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.taskexecutor;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Task status.
+ */
+public class HadoopTaskStatus implements Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private HadoopTaskState state;
+
+    /** */
+    private Throwable failCause;
+
+    /** */
+    private HadoopCounters cntrs;
+
+    /**
+     * Default constructor required by {@link Externalizable}.
+     */
+    public HadoopTaskStatus() {
+        // No-op.
+    }
+
+    /**
+     * Creates new instance.
+     *
+     * @param state Task state.
+     * @param failCause Failure cause (if any).
+     */
+    public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause) {
+        this(state, failCause, null);
+    }
+
+    /**
+     * Creates new instance.
+     *
+     * @param state Task state.
+     * @param failCause Failure cause (if any).
+     * @param cntrs Task counters.
+     */
+    public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause,
+        @Nullable HadoopCounters cntrs) {
+        assert state != null;
+
+        this.state = state;
+        this.failCause = failCause;
+        this.cntrs = cntrs;
+    }
+
+    /**
+     * @return State.
+     */
+    public HadoopTaskState state() {
+        return state;
+    }
+
+    /**
+     * @return Fail cause.
+     */
+    @Nullable public Throwable failCause() {
+        return failCause;
+    }
+
+    /**
+     * @return Counters.
+     */
+    @Nullable public HadoopCounters counters() {
+        return cntrs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(HadoopTaskStatus.class, this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        out.writeObject(state);
+        out.writeObject(failCause);
+        out.writeObject(cntrs);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        state = (HadoopTaskState)in.readObject();
+        failCause = (Throwable)in.readObject();
+        cntrs = (HadoopCounters)in.readObject();
+    }
+}
\ No newline at end of file


[31/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
new file mode 100644
index 0000000..a69b72a
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java
@@ -0,0 +1,615 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.hadoop.mapreduce.IgniteHadoopMapReducePlanner;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.hadoop.planner.HadoopAbstractMapReducePlanner;
+import org.apache.ignite.internal.processors.igfs.IgfsBlockLocationImpl;
+import org.apache.ignite.internal.processors.igfs.IgfsIgniteMock;
+import org.apache.ignite.internal.processors.igfs.IgfsMock;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.testframework.GridTestNode;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+
+/**
+ *
+ */
+public class HadoopDefaultMapReducePlannerSelfTest extends HadoopAbstractSelfTest {
+    /** */
+    private static final UUID ID_1 = new UUID(0, 1);
+
+    /** */
+    private static final UUID ID_2 = new UUID(0, 2);
+
+    /** */
+    private static final UUID ID_3 = new UUID(0, 3);
+
+    /** */
+    private static final String HOST_1 = "host1";
+
+    /** */
+    private static final String HOST_2 = "host2";
+
+    /** */
+    private static final String HOST_3 = "host3";
+
+    /** */
+    private static final String INVALID_HOST_1 = "invalid_host1";
+
+    /** */
+    private static final String INVALID_HOST_2 = "invalid_host2";
+
+    /** */
+    private static final String INVALID_HOST_3 = "invalid_host3";
+
+    /** Mocked IGFS. */
+    private static final IgniteFileSystem IGFS = new MockIgfs();
+
+    /** Mocked Grid. */
+    private static final IgfsIgniteMock GRID = new IgfsIgniteMock(null, IGFS);
+
+    /** Planner. */
+    private static final HadoopMapReducePlanner PLANNER = new IgniteHadoopMapReducePlanner();
+
+    /** Block locations. */
+    private static final Map<Block, Collection<IgfsBlockLocation>> BLOCK_MAP = new HashMap<>();
+
+    /** Proxy map. */
+    private static final Map<URI, Boolean> PROXY_MAP = new HashMap<>();
+
+    /** Last created plan. */
+    private static final ThreadLocal<HadoopMapReducePlan> PLAN = new ThreadLocal<>();
+
+    /**
+     * Static initializer.
+     */
+    static {
+        GridTestUtils.setFieldValue(PLANNER, HadoopAbstractMapReducePlanner.class, "ignite", GRID);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        GridTestUtils.setFieldValue(PLANNER, HadoopAbstractMapReducePlanner.class, "log", log());
+
+        BLOCK_MAP.clear();
+        PROXY_MAP.clear();
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testIgfsOneBlockPerNode() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1);
+        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_2);
+        HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_3);
+
+        mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1));
+        mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_2));
+        mapIgfsBlock(split3.file(), 0, 100, location(0, 100, ID_3));
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureEmpty(ID_2);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1);
+        assert ensureMappers(ID_1, split1);
+        assert ensureReducers(ID_1, 2);
+        assert ensureEmpty(ID_2);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) || ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2, split3);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureMappers(ID_3, split3);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureReducers(ID_3, 1);
+
+        plan(5, split1, split2, split3);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureMappers(ID_3, split3);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testNonIgfsOneBlockPerNode() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1);
+        HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_2);
+        HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_3);
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureEmpty(ID_2);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1);
+        assert ensureMappers(ID_1, split1);
+        assert ensureReducers(ID_1, 2);
+        assert ensureEmpty(ID_2);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) || ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2, split3);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureMappers(ID_3, split3);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureReducers(ID_3, 1);
+
+        plan(5, split1, split2, split3);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureMappers(ID_3, split3);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testIgfsSeveralBlocksPerNode() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2);
+        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2);
+        HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_1, HOST_3);
+
+        mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1, ID_2));
+        mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_1, ID_2));
+        mapIgfsBlock(split3.file(), 0, 100, location(0, 100, ID_1, ID_3));
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split1, split2);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1, split2);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2, split3);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureReducers(ID_3, 1);
+
+        plan(5, split1, split2, split3);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testNonIgfsSeveralBlocksPerNode() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1, HOST_2);
+        HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_1, HOST_2);
+        HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_1, HOST_3);
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split1, split2);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) || ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1, split2);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+
+        plan(3, split1, split2, split3);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureReducers(ID_3, 1);
+
+        plan(5, split1, split2, split3);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testIgfsSeveralComplexBlocksPerNode() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2, HOST_3);
+        HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2, HOST_3);
+
+        mapIgfsBlock(split1.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_1, ID_3));
+        mapIgfsBlock(split2.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_2, ID_3));
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureEmpty(ID_2);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split2);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_1);
+        assert ensureEmpty(ID_3);
+
+        plan(1, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1) || ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0);
+        assert ensureEmpty(ID_3);
+
+        plan(2, split1, split2);
+        assert ensureMappers(ID_1, split1);
+        assert ensureMappers(ID_2, split2);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureEmpty(ID_3);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testNonIgfsOrphans() throws IgniteCheckedException {
+        HadoopFileBlock split1 = split(false, "/file1", 0, 100, INVALID_HOST_1, INVALID_HOST_2);
+        HadoopFileBlock split2 = split(false, "/file2", 0, 100, INVALID_HOST_1, INVALID_HOST_3);
+        HadoopFileBlock split3 = split(false, "/file3", 0, 100, INVALID_HOST_2, INVALID_HOST_3);
+
+        plan(1, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) && ensureEmpty(ID_3) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 1) && ensureEmpty(ID_3) ||
+            ensureEmpty(ID_1) && ensureEmpty(ID_2) && ensureMappers(ID_3, split1) && ensureReducers(ID_3, 1);
+
+        plan(2, split1);
+        assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 2) && ensureEmpty(ID_2) && ensureEmpty(ID_3) ||
+            ensureEmpty(ID_1) && ensureMappers(ID_2, split1) && ensureReducers(ID_2, 2) && ensureEmpty(ID_3) ||
+            ensureEmpty(ID_1) && ensureEmpty(ID_2) && ensureMappers(ID_3, split1) && ensureReducers(ID_3, 2);
+
+        plan(1, split1, split2, split3);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 0) && ensureReducers(ID_3, 0) ||
+            ensureReducers(ID_1, 0) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 0) ||
+            ensureReducers(ID_1, 0) && ensureReducers(ID_2, 0) && ensureReducers(ID_3, 1);
+
+        plan(3, split1, split2, split3);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
+        assert ensureReducers(ID_1, 1);
+        assert ensureReducers(ID_2, 1);
+        assert ensureReducers(ID_3, 1);
+
+        plan(5, split1, split2, split3);
+        assert ensureMappers(ID_1, split1) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split1) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split3) ||
+            ensureMappers(ID_1, split2) && ensureMappers(ID_2, split3) && ensureMappers(ID_3, split1) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split1) && ensureMappers(ID_3, split2) ||
+            ensureMappers(ID_1, split3) && ensureMappers(ID_2, split2) && ensureMappers(ID_3, split1);
+        assert ensureReducers(ID_1, 1) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 1) && ensureReducers(ID_3, 2) ||
+            ensureReducers(ID_1, 2) && ensureReducers(ID_2, 2) && ensureReducers(ID_3, 1);
+    }
+
+    /**
+     * Create plan.
+     *
+     * @param reducers Reducers count.
+     * @param splits Splits.
+     * @return Plan.
+     * @throws IgniteCheckedException If failed.
+     */
+    private static HadoopMapReducePlan plan(int reducers, HadoopInputSplit... splits) throws IgniteCheckedException {
+        assert reducers > 0;
+        assert splits != null && splits.length > 0;
+
+        Collection<HadoopInputSplit> splitList = new ArrayList<>(splits.length);
+
+        Collections.addAll(splitList, splits);
+
+        Collection<ClusterNode> top = new ArrayList<>();
+
+        GridTestNode node1 = new GridTestNode(ID_1);
+        GridTestNode node2 = new GridTestNode(ID_2);
+        GridTestNode node3 = new GridTestNode(ID_3);
+
+        node1.setHostName(HOST_1);
+        node2.setHostName(HOST_2);
+        node3.setHostName(HOST_3);
+
+        top.add(node1);
+        top.add(node2);
+        top.add(node3);
+
+        HadoopMapReducePlan plan = PLANNER.preparePlan(new HadoopPlannerMockJob(splitList, reducers), top, null);
+
+        PLAN.set(plan);
+
+        return plan;
+    }
+
+    /**
+     * Ensure that node contains the given mappers.
+     *
+     * @param nodeId Node ID.
+     * @param expSplits Expected splits.
+     * @return {@code True} if this assumption is valid.
+     */
+    private static boolean ensureMappers(UUID nodeId, HadoopInputSplit... expSplits) {
+        Collection<HadoopInputSplit> expSplitsCol = new ArrayList<>();
+
+        Collections.addAll(expSplitsCol, expSplits);
+
+        Collection<HadoopInputSplit> splits = PLAN.get().mappers(nodeId);
+
+        return F.eq(expSplitsCol, splits);
+    }
+
+    /**
+     * Ensure that node contains the given amount of reducers.
+     *
+     * @param nodeId Node ID.
+     * @param reducers Reducers.
+     * @return {@code True} if this assumption is valid.
+     */
+    private static boolean ensureReducers(UUID nodeId, int reducers) {
+        int[] reducersArr = PLAN.get().reducers(nodeId);
+
+        return reducers == 0 ? F.isEmpty(reducersArr) : (reducersArr != null && reducersArr.length == reducers);
+    }
+
+    /**
+     * Ensure that no mappers and reducers is located on this node.
+     *
+     * @param nodeId Node ID.
+     * @return {@code True} if this assumption is valid.
+     */
+    private static boolean ensureEmpty(UUID nodeId) {
+        return F.isEmpty(PLAN.get().mappers(nodeId)) && F.isEmpty(PLAN.get().reducers(nodeId));
+    }
+
+    /**
+     * Create split.
+     *
+     * @param igfs IGFS flag.
+     * @param file File.
+     * @param start Start.
+     * @param len Length.
+     * @param hosts Hosts.
+     * @return Split.
+     */
+    private static HadoopFileBlock split(boolean igfs, String file, long start, long len, String... hosts) {
+        URI uri = URI.create((igfs ? "igfs://igfs@" : "hdfs://") + file);
+
+        return new HadoopFileBlock(hosts, uri, start, len);
+    }
+
+    /**
+     * Create block location.
+     *
+     * @param start Start.
+     * @param len Length.
+     * @param nodeIds Node IDs.
+     * @return Block location.
+     */
+    private static IgfsBlockLocation location(long start, long len, UUID... nodeIds) {
+        assert nodeIds != null && nodeIds.length > 0;
+
+        Collection<ClusterNode> nodes = new ArrayList<>(nodeIds.length);
+
+        for (UUID id : nodeIds)
+            nodes.add(new GridTestNode(id));
+
+        return new IgfsBlockLocationImpl(start, len, nodes);
+    }
+
+    /**
+     * Map IGFS block to nodes.
+     *
+     * @param file File.
+     * @param start Start.
+     * @param len Length.
+     * @param locations Locations.
+     */
+    private static void mapIgfsBlock(URI file, long start, long len, IgfsBlockLocation... locations) {
+        assert locations != null && locations.length > 0;
+
+        IgfsPath path = new IgfsPath(file);
+
+        Block block = new Block(path, start, len);
+
+        Collection<IgfsBlockLocation> locationsList = new ArrayList<>();
+
+        Collections.addAll(locationsList, locations);
+
+        BLOCK_MAP.put(block, locationsList);
+    }
+
+    /**
+     * Block.
+     */
+    private static class Block {
+        /** */
+        private final IgfsPath path;
+
+        /** */
+        private final long start;
+
+        /** */
+        private final long len;
+
+        /**
+         * Constructor.
+         *
+         * @param path Path.
+         * @param start Start.
+         * @param len Length.
+         */
+        private Block(IgfsPath path, long start, long len) {
+            this.path = path;
+            this.start = start;
+            this.len = len;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("RedundantIfStatement")
+        @Override public boolean equals(Object o) {
+            if (this == o) return true;
+            if (!(o instanceof Block)) return false;
+
+            Block block = (Block) o;
+
+            if (len != block.len)
+                return false;
+
+            if (start != block.start)
+                return false;
+
+            if (!path.equals(block.path))
+                return false;
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            int res = path.hashCode();
+
+            res = 31 * res + (int) (start ^ (start >>> 32));
+            res = 31 * res + (int) (len ^ (len >>> 32));
+
+            return res;
+        }
+    }
+
+    /**
+     * Mocked IGFS.
+     */
+    private static class MockIgfs extends IgfsMock {
+        /**
+         * Constructor.
+         */
+        public MockIgfs() {
+            super("igfs");
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isProxy(URI path) {
+            return PROXY_MAP.containsKey(path) && PROXY_MAP.get(path);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) {
+            return BLOCK_MAP.get(new Block(path, start, len));
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean exists(IgfsPath path) {
+            return true;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
new file mode 100644
index 0000000..843b42b
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopErrorSimulator.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Error simulator.
+ */
+public class HadoopErrorSimulator {
+    /** No-op singleton instance. */
+    public static final HadoopErrorSimulator noopInstance = new HadoopErrorSimulator();
+
+    /** Instance ref. */
+    private static final AtomicReference<HadoopErrorSimulator> ref = new AtomicReference<>(noopInstance);
+
+    /**
+     * Creates simulator of given kind with given stage bits.
+     *
+     * @param kind The kind.
+     * @param bits The stage bits.
+     * @return The simulator.
+     */
+    public static HadoopErrorSimulator create(Kind kind, int bits) {
+        switch (kind) {
+            case Noop:
+                return noopInstance;
+            case Runtime:
+                return new RuntimeExceptionBitHadoopErrorSimulator(bits);
+            case IOException:
+                return new IOExceptionBitHadoopErrorSimulator(bits);
+            case Error:
+                return new ErrorBitHadoopErrorSimulator(bits);
+            default:
+                throw new IllegalStateException("Unknown kind: " + kind);
+        }
+    }
+
+    /**
+     * Gets the error simulator instance.
+     */
+    public static HadoopErrorSimulator instance() {
+        return ref.get();
+    }
+
+    /**
+     * Sets instance.
+     */
+    public static boolean setInstance(HadoopErrorSimulator expect, HadoopErrorSimulator update) {
+        return ref.compareAndSet(expect, update);
+    }
+
+    /**
+     * Constructor.
+     */
+    private HadoopErrorSimulator() {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onMapConfigure() {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onMapSetup()  throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onMap() throws IOException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onMapCleanup()  throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onMapClose()  throws IOException {
+        // no-op
+    }
+
+    /**
+     * setConf() does not declare IOException to be thrown.
+     */
+    public void onCombineConfigure() {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onCombineSetup() throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onCombine() throws IOException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onCombineCleanup() throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onReduceConfigure() {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onReduceSetup()  throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onReduce()  throws IOException {
+        // no-op
+    }
+
+    /**
+     * Invoked on the named stage.
+     */
+    public void onReduceCleanup()  throws IOException, InterruptedException {
+        // no-op
+    }
+
+    /**
+     * Error kind.
+     */
+    public enum Kind {
+        /** No error. */
+        Noop,
+
+        /** Runtime. */
+        Runtime,
+
+        /** IOException. */
+        IOException,
+
+        /** java.lang.Error. */
+        Error
+    }
+
+    /**
+     * Runtime error simulator.
+     */
+    public static class RuntimeExceptionBitHadoopErrorSimulator extends HadoopErrorSimulator {
+        /** Stage bits: defines what map-reduce stages will cause errors. */
+        private final int bits;
+
+        /**
+         * Constructor.
+         */
+        protected RuntimeExceptionBitHadoopErrorSimulator(int b) {
+            bits = b;
+        }
+
+        /**
+         * Simulates an error.
+         */
+        protected void simulateError() throws IOException {
+            throw new RuntimeException("An error simulated by " + getClass().getSimpleName());
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onMapConfigure() {
+            try {
+                if ((bits & 1) != 0)
+                    simulateError();
+            }
+            catch (IOException e) {
+                // ignore
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onMapSetup() throws IOException, InterruptedException {
+            if ((bits & 2) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onMap() throws IOException {
+            if ((bits & 4) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onMapCleanup() throws IOException, InterruptedException {
+            if ((bits & 8) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onCombineConfigure() {
+            try {
+                if ((bits & 16) != 0)
+                    simulateError();
+            }
+            catch (IOException e) {
+                // ignore
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onCombineSetup() throws IOException, InterruptedException {
+            if ((bits & 32) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onCombine() throws IOException {
+            if ((bits & 64) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onCombineCleanup() throws IOException, InterruptedException {
+            if ((bits & 128) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onReduceConfigure() {
+            try {
+                if ((bits & 256) != 0)
+                    simulateError();
+            }
+            catch (IOException e) {
+                // ignore
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onReduceSetup() throws IOException, InterruptedException {
+            if ((bits & 512) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onReduce() throws IOException {
+            if ((bits & 1024) != 0)
+                simulateError();
+        }
+
+        /** {@inheritDoc} */
+        @Override public final void onReduceCleanup() throws IOException, InterruptedException {
+            if ((bits & 2048) != 0)
+                simulateError();
+        }
+    }
+
+    /**
+     * java.lang.Error simulator.
+     */
+    public static class ErrorBitHadoopErrorSimulator extends RuntimeExceptionBitHadoopErrorSimulator {
+        /**
+         * Constructor.
+         */
+        public ErrorBitHadoopErrorSimulator(int bits) {
+            super(bits);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void simulateError() {
+            throw new Error("An error simulated by " + getClass().getSimpleName());
+        }
+    }
+
+    /**
+     * IOException simulator.
+     */
+    public static class IOExceptionBitHadoopErrorSimulator extends RuntimeExceptionBitHadoopErrorSimulator {
+        /**
+         * Constructor.
+         */
+        public IOExceptionBitHadoopErrorSimulator(int bits) {
+            super(bits);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void simulateError() throws IOException {
+            throw new IOException("An IOException simulated by " + getClass().getSimpleName());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
new file mode 100644
index 0000000..946ba77
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ * Test file systems for the working directory multi-threading support.
+ */
+public class HadoopFileSystemsTest extends HadoopAbstractSelfTest {
+    /** the number of threads */
+    private static final int THREAD_COUNT = 3;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 1;
+    }
+
+
+    /**
+     * Test the file system with specified URI for the multi-thread working directory support.
+     *
+     * @param uri Base URI of the file system (scheme and authority).
+     * @throws Exception If fails.
+     */
+    private void testFileSystem(final URI uri) throws Exception {
+        final Configuration cfg = new Configuration();
+
+        setupFileSystems(cfg);
+
+        cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP,
+            new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString());
+
+        final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT);
+        final CountDownLatch changeDirPhase = new CountDownLatch(THREAD_COUNT);
+        final CountDownLatch changeAbsDirPhase = new CountDownLatch(THREAD_COUNT);
+        final CountDownLatch finishPhase = new CountDownLatch(THREAD_COUNT);
+
+        final Path[] newUserInitWorkDir = new Path[THREAD_COUNT];
+        final Path[] newWorkDir = new Path[THREAD_COUNT];
+        final Path[] newAbsWorkDir = new Path[THREAD_COUNT];
+        final Path[] newInstanceWorkDir = new Path[THREAD_COUNT];
+
+        final AtomicInteger threadNum = new AtomicInteger(0);
+
+        GridTestUtils.runMultiThreadedAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    int curThreadNum = threadNum.getAndIncrement();
+
+                    if ("file".equals(uri.getScheme()))
+                        FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum));
+
+                    changeUserPhase.countDown();
+                    changeUserPhase.await();
+
+                    newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
+
+                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum));
+
+                    changeDirPhase.countDown();
+                    changeDirPhase.await();
+
+                    newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
+
+                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum));
+
+                    changeAbsDirPhase.countDown();
+                    changeAbsDirPhase.await();
+
+                    newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();
+
+                    newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory();
+
+                    finishPhase.countDown();
+                }
+                catch (InterruptedException | IOException e) {
+                    error("Failed to execute test thread.", e);
+
+                    fail();
+                }
+            }
+        }, THREAD_COUNT, "filesystems-test");
+
+        finishPhase.await();
+
+        for (int i = 0; i < THREAD_COUNT; i ++) {
+            cfg.set(MRJobConfig.USER_NAME, "user" + i);
+
+            Path workDir = new Path(new Path(uri), "user/user" + i);
+
+            cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString());
+
+            assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());
+
+            assertEquals(workDir, newUserInitWorkDir[i]);
+
+            assertEquals(new Path(new Path(uri), "user/user" + i + "/folder" + i), newWorkDir[i]);
+
+            assertEquals(new Path("/folder" + i), newAbsWorkDir[i]);
+
+            assertEquals(new Path(new Path(uri), "user/" + System.getProperty("user.name")), newInstanceWorkDir[i]);
+        }
+
+        System.out.println(System.getProperty("user.dir"));
+    }
+
+    /**
+     * Test LocalFS multi-thread working directory.
+     *
+     * @throws Exception If fails.
+     */
+    public void testLocal() throws Exception {
+        testFileSystem(URI.create("file:///"));
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
new file mode 100644
index 0000000..db87e33
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Grouping test.
+ */
+public class HadoopGroupingTest extends HadoopAbstractSelfTest {
+    /** */
+    private static final String PATH_OUTPUT = "/test-out";
+
+    /** */
+    private static final GridConcurrentHashSet<UUID> vals = HadoopSharedMap.map(HadoopGroupingTest.class)
+        .put("vals", new GridConcurrentHashSet<UUID>());
+
+    /** {@inheritDoc} */
+    @Override protected int gridCount() {
+        return 3;
+    }
+
+    /** {@inheritDoc} */
+    protected boolean igfsEnabled() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGroupingReducer() throws Exception {
+        doTestGrouping(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGroupingCombiner() throws Exception {
+        doTestGrouping(true);
+    }
+
+    /**
+     * @param combiner With combiner.
+     * @throws Exception If failed.
+     */
+    public void doTestGrouping(boolean combiner) throws Exception {
+        vals.clear();
+
+        Job job = Job.getInstance();
+
+        job.setInputFormatClass(InFormat.class);
+        job.setOutputFormatClass(OutFormat.class);
+
+        job.setOutputKeyClass(YearTemperature.class);
+        job.setOutputValueClass(Text.class);
+
+        job.setMapperClass(Mapper.class);
+
+        if (combiner) {
+            job.setCombinerClass(MyReducer.class);
+            job.setNumReduceTasks(0);
+            job.setCombinerKeyGroupingComparatorClass(YearComparator.class);
+        }
+        else {
+            job.setReducerClass(MyReducer.class);
+            job.setNumReduceTasks(4);
+            job.setGroupingComparatorClass(YearComparator.class);
+        }
+
+        grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2),
+            createJobInfo(job.getConfiguration())).get(30000);
+
+        assertTrue(vals.isEmpty());
+    }
+
+    public static class MyReducer extends Reducer<YearTemperature, Text, Text, Object> {
+        /** */
+        int lastYear;
+
+        @Override protected void reduce(YearTemperature key, Iterable<Text> vals0, Context context)
+            throws IOException, InterruptedException {
+            X.println("___ : " + context.getTaskAttemptID() + " --> " + key);
+
+            Set<UUID> ids = new HashSet<>();
+
+            for (Text val : vals0)
+                assertTrue(ids.add(UUID.fromString(val.toString())));
+
+            for (Text val : vals0)
+                assertTrue(ids.remove(UUID.fromString(val.toString())));
+
+            assertTrue(ids.isEmpty());
+
+            assertTrue(key.year > lastYear);
+
+            lastYear = key.year;
+
+            for (Text val : vals0)
+                assertTrue(vals.remove(UUID.fromString(val.toString())));
+        }
+    }
+
+    public static class YearComparator implements RawComparator<YearTemperature> { // Grouping comparator.
+        /** {@inheritDoc} */
+        @Override public int compare(YearTemperature o1, YearTemperature o2) {
+            return Integer.compare(o1.year, o2.year);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
+            throw new IllegalStateException();
+        }
+    }
+
+    public static class YearTemperature implements WritableComparable<YearTemperature>, Cloneable {
+        /** */
+        private int year;
+
+        /** */
+        private int temperature;
+
+        /** {@inheritDoc} */
+        @Override public void write(DataOutput out) throws IOException {
+            out.writeInt(year);
+            out.writeInt(temperature);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void readFields(DataInput in) throws IOException {
+            year = in.readInt();
+            temperature = in.readInt();
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            throw new IllegalStateException();
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() { // To be partitioned by year.
+            return year;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int compareTo(YearTemperature o) {
+            int res = Integer.compare(year, o.year);
+
+            if (res != 0)
+                return res;
+
+            // Sort comparator by year and temperature, to find max for year.
+            return Integer.compare(o.temperature, temperature);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(YearTemperature.class, this);
+        }
+    }
+
+    public static class InFormat extends InputFormat<YearTemperature, Text> {
+        /** {@inheritDoc} */
+        @Override public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
+            ArrayList<InputSplit> list = new ArrayList<>();
+
+            for (int i = 0; i < 10; i++)
+                list.add(new HadoopSortingTest.FakeSplit(20));
+
+            return list;
+        }
+
+        /** {@inheritDoc} */
+        @Override public RecordReader<YearTemperature, Text> createRecordReader(final InputSplit split,
+            TaskAttemptContext context) throws IOException, InterruptedException {
+            return new RecordReader<YearTemperature, Text>() {
+                /** */
+                int cnt;
+
+                /** */
+                Random rnd = new GridRandom();
+
+                /** */
+                YearTemperature key = new YearTemperature();
+
+                /** */
+                Text val = new Text();
+
+                @Override public void initialize(InputSplit split, TaskAttemptContext context) {
+                    // No-op.
+                }
+
+                @Override public boolean nextKeyValue() throws IOException, InterruptedException {
+                    return cnt++ < split.getLength();
+                }
+
+                @Override public YearTemperature getCurrentKey() {
+                    key.year = 1990 + rnd.nextInt(10);
+                    key.temperature = 10 + rnd.nextInt(20);
+
+                    return key;
+                }
+
+                @Override public Text getCurrentValue() {
+                    UUID id = UUID.randomUUID();
+
+                    assertTrue(vals.add(id));
+
+                    val.set(id.toString());
+
+                    return val;
+                }
+
+                @Override public float getProgress() {
+                    return 0;
+                }
+
+                @Override public void close() {
+                    // No-op.
+                }
+            };
+        }
+    }
+
+    /**
+     *
+     */
+    public static class OutFormat extends OutputFormat {
+        /** {@inheritDoc} */
+        @Override public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException {
+            return null;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
new file mode 100644
index 0000000..9e268b7
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java
@@ -0,0 +1,345 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Job tracker self test.
+ */
+public class HadoopJobTrackerSelfTest extends HadoopAbstractSelfTest {
+    /** */
+    private static final String PATH_OUTPUT = "/test-out";
+
+    /** Test block count parameter name. */
+    private static final int BLOCK_CNT = 10;
+
+    /** */
+    private static HadoopSharedMap m = HadoopSharedMap.map(HadoopJobTrackerSelfTest.class);
+
+    /** Map task execution count. */
+    private static final AtomicInteger mapExecCnt = m.put("mapExecCnt", new AtomicInteger());
+
+    /** Reduce task execution count. */
+    private static final AtomicInteger reduceExecCnt = m.put("reduceExecCnt", new AtomicInteger());
+
+    /** Reduce task execution count. */
+    private static final AtomicInteger combineExecCnt = m.put("combineExecCnt", new AtomicInteger());
+
+    /** */
+    private static final Map<String, CountDownLatch> latch = m.put("latch", new HashMap<String, CountDownLatch>());
+
+    /** {@inheritDoc} */
+    @Override protected boolean igfsEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrids(gridCount());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        latch.put("mapAwaitLatch", new CountDownLatch(1));
+        latch.put("reduceAwaitLatch", new CountDownLatch(1));
+        latch.put("combineAwaitLatch", new CountDownLatch(1));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        mapExecCnt.set(0);
+        combineExecCnt.set(0);
+        reduceExecCnt.set(0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        cfg.setMapReducePlanner(new HadoopTestRoundRobinMrPlanner());
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSimpleTaskSubmit() throws Exception {
+        try {
+            UUID globalId = UUID.randomUUID();
+
+            Job job = Job.getInstance();
+            setupFileSystems(job.getConfiguration());
+
+            job.setMapperClass(TestMapper.class);
+            job.setReducerClass(TestReducer.class);
+            job.setInputFormatClass(InFormat.class);
+
+            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "1"));
+
+            HadoopJobId jobId = new HadoopJobId(globalId, 1);
+
+            grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
+
+            checkStatus(jobId, false);
+
+            info("Releasing map latch.");
+
+            latch.get("mapAwaitLatch").countDown();
+
+            checkStatus(jobId, false);
+
+            info("Releasing reduce latch.");
+
+            latch.get("reduceAwaitLatch").countDown();
+
+            checkStatus(jobId, true);
+
+            assertEquals(10, mapExecCnt.get());
+            assertEquals(0, combineExecCnt.get());
+            assertEquals(1, reduceExecCnt.get());
+        }
+        finally {
+            // Safety.
+            latch.get("mapAwaitLatch").countDown();
+            latch.get("combineAwaitLatch").countDown();
+            latch.get("reduceAwaitLatch").countDown();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTaskWithCombinerPerMap() throws Exception {
+        try {
+            UUID globalId = UUID.randomUUID();
+
+            Job job = Job.getInstance();
+            setupFileSystems(job.getConfiguration());
+
+            job.setMapperClass(TestMapper.class);
+            job.setReducerClass(TestReducer.class);
+            job.setCombinerClass(TestCombiner.class);
+            job.setInputFormatClass(InFormat.class);
+
+            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "2"));
+
+            HadoopJobId jobId = new HadoopJobId(globalId, 1);
+
+            grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
+
+            checkStatus(jobId, false);
+
+            info("Releasing map latch.");
+
+            latch.get("mapAwaitLatch").countDown();
+
+            checkStatus(jobId, false);
+
+            // All maps are completed. We have a combiner, so no reducers should be executed
+            // before combiner latch is released.
+
+            U.sleep(50);
+
+            assertEquals(0, reduceExecCnt.get());
+
+            info("Releasing combiner latch.");
+
+            latch.get("combineAwaitLatch").countDown();
+
+            checkStatus(jobId, false);
+
+            info("Releasing reduce latch.");
+
+            latch.get("reduceAwaitLatch").countDown();
+
+            checkStatus(jobId, true);
+
+            assertEquals(10, mapExecCnt.get());
+            assertEquals(10, combineExecCnt.get());
+            assertEquals(1, reduceExecCnt.get());
+        }
+        finally {
+            // Safety.
+            latch.get("mapAwaitLatch").countDown();
+            latch.get("combineAwaitLatch").countDown();
+            latch.get("reduceAwaitLatch").countDown();
+        }
+    }
+
+    /**
+     * Checks job execution status.
+     *
+     * @param jobId Job ID.
+     * @param complete Completion status.
+     * @throws Exception If failed.
+     */
+    private void checkStatus(HadoopJobId jobId, boolean complete) throws Exception {
+        for (int i = 0; i < gridCount(); i++) {
+            IgniteKernal kernal = (IgniteKernal)grid(i);
+
+            Hadoop hadoop = kernal.hadoop();
+
+            HadoopJobStatus stat = hadoop.status(jobId);
+
+            assert stat != null;
+
+            IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
+
+            if (!complete)
+                assertFalse(fut.isDone());
+            else {
+                info("Waiting for status future completion on node [idx=" + i + ", nodeId=" +
+                    kernal.getLocalNodeId() + ']');
+
+                fut.get();
+            }
+        }
+    }
+
+    /**
+     * Test input format
+     */
+    public static class InFormat extends InputFormat {
+
+        @Override public List<InputSplit> getSplits(JobContext ctx) throws IOException, InterruptedException {
+            List<InputSplit> res = new ArrayList<>(BLOCK_CNT);
+
+            for (int i = 0; i < BLOCK_CNT; i++)
+                try {
+                    res.add(new FileSplit(new Path(new URI("someFile")), i, i + 1, new String[] {"localhost"}));
+                }
+                catch (URISyntaxException e) {
+                    throw new IOException(e);
+                }
+
+            return res;
+        }
+
+        @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext ctx) throws IOException, InterruptedException {
+            return new RecordReader() {
+                @Override public void initialize(InputSplit split, TaskAttemptContext ctx) {
+                }
+
+                @Override public boolean nextKeyValue() {
+                    return false;
+                }
+
+                @Override public Object getCurrentKey() {
+                    return null;
+                }
+
+                @Override public Object getCurrentValue() {
+                    return null;
+                }
+
+                @Override public float getProgress() {
+                    return 0;
+                }
+
+                @Override public void close() {
+
+                }
+            };
+        }
+    }
+
+    /**
+     * Test mapper.
+     */
+    private static class TestMapper extends Mapper {
+        @Override public void run(Context ctx) throws IOException, InterruptedException {
+            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
+
+            latch.get("mapAwaitLatch").await();
+
+            mapExecCnt.incrementAndGet();
+
+            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
+        }
+    }
+
+    /**
+     * Test reducer.
+     */
+    private static class TestReducer extends Reducer {
+        @Override public void run(Context ctx) throws IOException, InterruptedException {
+            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
+
+            latch.get("reduceAwaitLatch").await();
+
+            reduceExecCnt.incrementAndGet();
+
+            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
+        }
+    }
+
+    /**
+     * Test combiner.
+     */
+    private static class TestCombiner extends Reducer {
+        @Override public void run(Context ctx) throws IOException, InterruptedException {
+            System.out.println("Running task: " + ctx.getTaskAttemptID().getTaskID().getId());
+
+            latch.get("combineAwaitLatch").await();
+
+            combineExecCnt.incrementAndGet();
+
+            System.out.println("Completed task: " + ctx.getTaskAttemptID().getTaskID().getId());
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
new file mode 100644
index 0000000..25ef382
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobConfigurable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.ignite.configuration.HadoopConfiguration;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount1;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
+
+/**
+ * Tests map-reduce execution with embedded mode.
+ */
+public class HadoopMapReduceEmbeddedSelfTest extends HadoopMapReduceTest {
+    /** */
+    private static Map<String, Boolean> flags = HadoopSharedMap.map(HadoopMapReduceEmbeddedSelfTest.class)
+        .put("flags", new HashMap<String, Boolean>());
+
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+
+    /**
+     * Tests whole job execution with all phases in old and new versions of API with definition of custom
+     * Serialization, Partitioner and IO formats.
+     * @throws Exception If fails.
+     */
+    public void testMultiReducerWholeMapReduceExecution() throws Exception {
+        IgfsPath inDir = new IgfsPath(PATH_INPUT);
+
+        igfs.mkdirs(inDir);
+
+        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
+
+        generateTestFile(inFile.toString(), "key1", 10000, "key2", 20000, "key3", 15000, "key4", 7000, "key5", 12000,
+            "key6", 18000 );
+
+        for (int i = 0; i < 2; i++) {
+            boolean useNewAPI = i == 1;
+
+            igfs.delete(new IgfsPath(PATH_OUTPUT), true);
+
+            flags.put("serializationWasConfigured", false);
+            flags.put("partitionerWasConfigured", false);
+            flags.put("inputFormatWasConfigured", false);
+            flags.put("outputFormatWasConfigured", false);
+
+            JobConf jobConf = new JobConf();
+
+            jobConf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, CustomSerialization.class.getName());
+
+            //To split into about 6-7 items for v2
+            jobConf.setInt(FileInputFormat.SPLIT_MAXSIZE, 65000);
+
+            //For v1
+            jobConf.setInt("fs.local.block.size", 65000);
+
+            // File system coordinates.
+            setupFileSystems(jobConf);
+
+            HadoopWordCount1.setTasksClasses(jobConf, !useNewAPI, !useNewAPI, !useNewAPI);
+
+            if (!useNewAPI) {
+                jobConf.setPartitionerClass(CustomV1Partitioner.class);
+                jobConf.setInputFormat(CustomV1InputFormat.class);
+                jobConf.setOutputFormat(CustomV1OutputFormat.class);
+            }
+
+            Job job = Job.getInstance(jobConf);
+
+            HadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI, false);
+
+            if (useNewAPI) {
+                job.setPartitionerClass(CustomV2Partitioner.class);
+                job.setInputFormatClass(CustomV2InputFormat.class);
+                job.setOutputFormatClass(CustomV2OutputFormat.class);
+            }
+
+            job.setOutputKeyClass(Text.class);
+            job.setOutputValueClass(IntWritable.class);
+
+            FileInputFormat.setInputPaths(job, new Path(igfsScheme() + inFile.toString()));
+            FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
+
+            job.setNumReduceTasks(3);
+
+            job.setJarByClass(HadoopWordCount2.class);
+
+            IgniteInternalFuture<?> fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1),
+                    createJobInfo(job.getConfiguration()));
+
+            fut.get();
+
+            assertTrue("Serialization was configured (new API is " + useNewAPI + ")",
+                 flags.get("serializationWasConfigured"));
+
+            assertTrue("Partitioner was configured (new API is = " + useNewAPI + ")",
+                 flags.get("partitionerWasConfigured"));
+
+            assertTrue("Input format was configured (new API is = " + useNewAPI + ")",
+                 flags.get("inputFormatWasConfigured"));
+
+            assertTrue("Output format was configured (new API is = " + useNewAPI + ")",
+                 flags.get("outputFormatWasConfigured"));
+
+            assertEquals("Use new API = " + useNewAPI,
+                "key3\t15000\n" +
+                "key6\t18000\n",
+                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00000")
+            );
+
+            assertEquals("Use new API = " + useNewAPI,
+                "key1\t10000\n" +
+                "key4\t7000\n",
+                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00001")
+            );
+
+            assertEquals("Use new API = " + useNewAPI,
+                "key2\t20000\n" +
+                "key5\t12000\n",
+                readAndSortFile(PATH_OUTPUT + "/" + (useNewAPI ? "part-r-" : "part-") + "00002")
+            );
+
+        }
+    }
+
+    /**
+     * Custom serialization class that inherits behaviour of native {@link WritableSerialization}.
+     */
+    protected static class CustomSerialization extends WritableSerialization {
+        @Override public void setConf(Configuration conf) {
+            super.setConf(conf);
+
+            flags.put("serializationWasConfigured", true);
+        }
+    }
+
+    /**
+     * Custom implementation of Partitioner in v1 API.
+     */
+    private static class CustomV1Partitioner extends org.apache.hadoop.mapred.lib.HashPartitioner {
+        /** {@inheritDoc} */
+        @Override public void configure(JobConf job) {
+            flags.put("partitionerWasConfigured", true);
+        }
+    }
+
+    /**
+     * Custom implementation of Partitioner in v2 API.
+     */
+    private static class CustomV2Partitioner extends org.apache.hadoop.mapreduce.lib.partition.HashPartitioner
+            implements Configurable {
+        /** {@inheritDoc} */
+        @Override public void setConf(Configuration conf) {
+            flags.put("partitionerWasConfigured", true);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Configuration getConf() {
+            return null;
+        }
+    }
+
+    /**
+     * Custom implementation of InputFormat in v2 API.
+     */
+    private static class CustomV2InputFormat extends org.apache.hadoop.mapreduce.lib.input.TextInputFormat implements Configurable {
+        /** {@inheritDoc} */
+        @Override public void setConf(Configuration conf) {
+            flags.put("inputFormatWasConfigured", true);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Configuration getConf() {
+            return null;
+        }
+    }
+
+    /**
+     * Custom implementation of OutputFormat in v2 API.
+     */
+    private static class CustomV2OutputFormat extends org.apache.hadoop.mapreduce.lib.output.TextOutputFormat implements Configurable {
+        /** {@inheritDoc} */
+        @Override public void setConf(Configuration conf) {
+            flags.put("outputFormatWasConfigured", true);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Configuration getConf() {
+            return null;
+        }
+    }
+
+    /**
+     * Custom implementation of InputFormat in v1 API.
+     */
+    private static class CustomV1InputFormat extends org.apache.hadoop.mapred.TextInputFormat {
+        /** {@inheritDoc} */
+        @Override public void configure(JobConf job) {
+            super.configure(job);
+
+            flags.put("inputFormatWasConfigured", true);
+        }
+    }
+
+    /**
+     * Custom implementation of OutputFormat in v1 API.
+     */
+    private static class CustomV1OutputFormat extends org.apache.hadoop.mapred.TextOutputFormat implements JobConfigurable {
+        /** {@inheritDoc} */
+        @Override public void configure(JobConf job) {
+            flags.put("outputFormatWasConfigured", true);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
new file mode 100644
index 0000000..dd12935
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceErrorResilienceTest.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+
+/**
+ * Test of error resiliency after an error in a map-reduce job execution.
+ * Combinations tested:
+ * { new ALI, old API }
+ *   x { unchecked exception, checked exception, error }
+ *   x { phase where the error happens }.
+ */
+public class HadoopMapReduceErrorResilienceTest extends HadoopAbstractMapReduceTest {
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError0_Runtime() throws Exception {
+        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.Runtime);
+    }
+
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError0_IOException() throws Exception {
+        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.IOException);
+    }
+
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError0_Error() throws Exception {
+        doTestRecoveryAfterAnError(0, HadoopErrorSimulator.Kind.Error);
+    }
+
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError7_Runtime() throws Exception {
+        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.Runtime);
+    }
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError7_IOException() throws Exception {
+        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.IOException);
+    }
+    /**
+     * Tests recovery.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRecoveryAfterAnError7_Error() throws Exception {
+        doTestRecoveryAfterAnError(7, HadoopErrorSimulator.Kind.Error);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 10 * 60 * 1000L;
+    }
+
+    /**
+     * Tests correct work after an error.
+     *
+     * @throws Exception On error.
+     */
+    private void doTestRecoveryAfterAnError(int useNewBits, HadoopErrorSimulator.Kind simulatorKind) throws Exception {
+        try {
+            IgfsPath inDir = new IgfsPath(PATH_INPUT);
+
+            igfs.mkdirs(inDir);
+
+            IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
+
+            generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow);
+
+            boolean useNewMapper = (useNewBits & 1) == 0;
+            boolean useNewCombiner = (useNewBits & 2) == 0;
+            boolean useNewReducer = (useNewBits & 4) == 0;
+
+            for (int i = 0; i < 12; i++) {
+                int bits = 1 << i;
+
+                System.out.println("############################ Simulator kind = " + simulatorKind
+                    + ", Stage bits = " + bits);
+
+                HadoopErrorSimulator sim = HadoopErrorSimulator.create(simulatorKind, bits);
+
+                doTestWithErrorSimulator(sim, inFile, useNewMapper, useNewCombiner, useNewReducer);
+            }
+        } catch (Throwable t) {
+            t.printStackTrace();
+
+            fail("Unexpected throwable: " + t);
+        }
+    }
+
+    /**
+     * Performs test with given error simulator.
+     *
+     * @param sim The simulator.
+     * @param inFile Input file.
+     * @param useNewMapper If the use new mapper API.
+     * @param useNewCombiner If to use new combiner.
+     * @param useNewReducer If to use new reducer API.
+     * @throws Exception If failed.
+     */
+    private void doTestWithErrorSimulator(HadoopErrorSimulator sim, IgfsPath inFile, boolean useNewMapper,
+        boolean useNewCombiner, boolean useNewReducer) throws Exception {
+        // Set real simulating error simulator:
+        assertTrue(HadoopErrorSimulator.setInstance(HadoopErrorSimulator.noopInstance, sim));
+
+        try {
+            // Expect failure there:
+            doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
+        }
+        catch (Throwable t) { // This may be an Error.
+            // Expected:
+            System.out.println(t.toString()); // Ignore, continue the test.
+        }
+
+        // Set no-op error simulator:
+        assertTrue(HadoopErrorSimulator.setInstance(sim, HadoopErrorSimulator.noopInstance));
+
+        // Expect success there:
+        doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
new file mode 100644
index 0000000..b703896
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
+
+/**
+ * Test of whole cycle of map-reduce processing via Job tracker.
+ */
+public class HadoopMapReduceTest extends HadoopAbstractMapReduceTest {
+    /**
+     * Tests whole job execution with all phases in all combination of new and old versions of API.
+     * @throws Exception If fails.
+     */
+    public void testWholeMapReduceExecution() throws Exception {
+        IgfsPath inDir = new IgfsPath(PATH_INPUT);
+
+        igfs.mkdirs(inDir);
+
+        IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input");
+
+        generateTestFile(inFile.toString(), "red", red, "blue", blue, "green", green, "yellow", yellow );
+
+        for (boolean[] apiMode: getApiModes()) {
+            assert apiMode.length == 3;
+
+            boolean useNewMapper = apiMode[0];
+            boolean useNewCombiner = apiMode[1];
+            boolean useNewReducer = apiMode[2];
+
+            doTest(inFile, useNewMapper, useNewCombiner, useNewReducer);
+        }
+    }
+
+    /**
+     * Gets API mode combinations to be tested.
+     * Each boolean[] is { newMapper, newCombiner, newReducer } flag triplet.
+     *
+     * @return Arrays of booleans indicating API combinations to test.
+     */
+    protected boolean[][] getApiModes() {
+        return new boolean[][] {
+            { false, false, false },
+            { false, false, true },
+            { false, true,  false },
+            { true,  false, false },
+            { true,  true,  true },
+        };
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
new file mode 100644
index 0000000..0c172c3
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopNoHadoopMapReduceTest.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop;
+
+import org.apache.ignite.configuration.IgniteConfiguration;
+
+/**
+ * Test attempt to execute a map-reduce task while no Hadoop processor available.
+ */
+public class HadoopNoHadoopMapReduceTest extends HadoopMapReduceTest {
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration c = super.getConfiguration(gridName);
+
+        c.setHadoopConfiguration(null);
+        c.setPeerClassLoadingEnabled(true);
+
+        return c;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testWholeMapReduceExecution() throws Exception {
+        try {
+            super.testWholeMapReduceExecution();
+
+            fail("IllegalStateException expected.");
+        }
+        catch (IllegalStateException ignore) {
+            // No-op.
+        }
+    }
+}


[03/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
deleted file mode 100644
index 214c2a8..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in PRIMARY mode.
- */
-public class IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest() {
-        super(PRIMARY, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
deleted file mode 100644
index d7f34a1..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in SECONDARY mode.
- */
-public class IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest() {
-        super(PROXY, false);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
deleted file mode 100644
index 0435eaa..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode.
- */
-public class IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest() {
-        super(DUAL_ASYNC, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
deleted file mode 100644
index 3af7274..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode.
- */
-public class IgniteHadoopFileSystemShmemExternalDualSyncSelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemExternalDualSyncSelfTest() {
-        super(DUAL_SYNC, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
deleted file mode 100644
index ce9dbd9..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in PRIMARY mode.
- */
-public class IgniteHadoopFileSystemShmemExternalPrimarySelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemExternalPrimarySelfTest() {
-        super(PRIMARY, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
deleted file mode 100644
index bc8c182..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-
-/**
- * IGFS Hadoop file system IPC shmem self test in SECONDARY mode.
- */
-public class IgniteHadoopFileSystemShmemExternalSecondarySelfTest
-    extends IgniteHadoopFileSystemShmemAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public IgniteHadoopFileSystemShmemExternalSecondarySelfTest() {
-        super(PROXY, true);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
deleted file mode 100644
index 3731213..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractMapReduceTest.java
+++ /dev/null
@@ -1,429 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.UUID;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper;
-import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
-import org.apache.ignite.igfs.IgfsMode;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsUserContext;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount1;
-import org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.util.lang.GridAbsPredicate;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.T2;
-import org.apache.ignite.lang.IgniteOutClosure;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.JOB_COUNTER_WRITER_PROPERTY;
-import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.createJobInfo;
-
-/**
- * Abstract test of whole cycle of map-reduce processing via Job tracker.
- */
-public class HadoopAbstractMapReduceTest extends HadoopAbstractWordCountTest {
-    /** IGFS block size. */
-    protected static final int IGFS_BLOCK_SIZE = 512 * 1024;
-
-    /** Amount of blocks to prefetch. */
-    protected static final int PREFETCH_BLOCKS = 1;
-
-    /** Amount of sequential block reads before prefetch is triggered. */
-    protected static final int SEQ_READS_BEFORE_PREFETCH = 2;
-
-    /** Secondary file system URI. */
-    protected static final String SECONDARY_URI = "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/";
-
-    /** Secondary file system configuration path. */
-    protected static final String SECONDARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml";
-
-    /** The user to run Hadoop job on behalf of. */
-    protected static final String USER = "vasya";
-
-    /** Secondary IGFS name. */
-    protected static final String SECONDARY_IGFS_NAME = "igfs-secondary";
-
-    /** Red constant. */
-    protected static final int red = 10_000;
-
-    /** Blue constant. */
-    protected static final int blue = 20_000;
-
-    /** Green constant. */
-    protected static final int green = 15_000;
-
-    /** Yellow constant. */
-    protected static final int yellow = 7_000;
-
-    /** The secondary Ignite node. */
-    protected Ignite igniteSecondary;
-
-    /** The secondary Fs. */
-    protected IgfsSecondaryFileSystem secondaryFs;
-
-    /** {@inheritDoc} */
-    @Override protected int gridCount() {
-        return 3;
-    }
-
-    /**
-     * Gets owner of a IgfsEx path.
-     * @param p The path.
-     * @return The owner.
-     */
-    private static String getOwner(final IgfsEx i, final IgfsPath p) {
-        return IgfsUserContext.doAs(USER, new IgniteOutClosure<String>() {
-            @Override public String apply() {
-                IgfsFile f = i.info(p);
-
-                assert f != null;
-
-                return f.property(IgfsUtils.PROP_USER_NAME);
-            }
-        });
-    }
-
-    /**
-     * Gets owner of a secondary Fs path.
-     * @param secFs The sec Fs.
-     * @param p The path.
-     * @return The owner.
-     */
-    private static String getOwnerSecondary(final IgfsSecondaryFileSystem secFs, final IgfsPath p) {
-        return IgfsUserContext.doAs(USER, new IgniteOutClosure<String>() {
-            @Override public String apply() {
-                return secFs.info(p).property(IgfsUtils.PROP_USER_NAME);
-            }
-        });
-    }
-
-    /**
-     * Checks owner of the path.
-     * @param p The path.
-     */
-    private void checkOwner(IgfsPath p) {
-        String ownerPrim = getOwner(igfs, p);
-        assertEquals(USER, ownerPrim);
-
-        String ownerSec = getOwnerSecondary(secondaryFs, p);
-        assertEquals(USER, ownerSec);
-    }
-
-    /**
-     * Does actual test job
-     *
-     * @param useNewMapper flag to use new mapper API.
-     * @param useNewCombiner flag to use new combiner API.
-     * @param useNewReducer flag to use new reducer API.
-     */
-    protected final void doTest(IgfsPath inFile, boolean useNewMapper, boolean useNewCombiner, boolean useNewReducer)
-        throws Exception {
-        igfs.delete(new IgfsPath(PATH_OUTPUT), true);
-
-        JobConf jobConf = new JobConf();
-
-        jobConf.set(JOB_COUNTER_WRITER_PROPERTY, IgniteHadoopFileSystemCounterWriter.class.getName());
-        jobConf.setUser(USER);
-        jobConf.set(IgniteHadoopFileSystemCounterWriter.COUNTER_WRITER_DIR_PROPERTY, "/xxx/${USER}/zzz");
-
-        //To split into about 40 items for v2
-        jobConf.setInt(FileInputFormat.SPLIT_MAXSIZE, 65000);
-
-        //For v1
-        jobConf.setInt("fs.local.block.size", 65000);
-
-        // File system coordinates.
-        setupFileSystems(jobConf);
-
-        HadoopWordCount1.setTasksClasses(jobConf, !useNewMapper, !useNewCombiner, !useNewReducer);
-
-        Job job = Job.getInstance(jobConf);
-
-        HadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer, compressOutputSnappy());
-
-        job.setOutputKeyClass(Text.class);
-        job.setOutputValueClass(IntWritable.class);
-
-        FileInputFormat.setInputPaths(job, new Path(igfsScheme() + inFile.toString()));
-        FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT));
-
-        job.setJarByClass(HadoopWordCount2.class);
-
-        HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1);
-
-        IgniteInternalFuture<?> fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration()));
-
-        fut.get();
-
-        checkJobStatistics(jobId);
-
-        final String outFile = PATH_OUTPUT + "/" + (useNewReducer ? "part-r-" : "part-") + "00000";
-
-        checkOwner(new IgfsPath(PATH_OUTPUT + "/" + "_SUCCESS"));
-
-        checkOwner(new IgfsPath(outFile));
-
-        String actual = readAndSortFile(outFile, job.getConfiguration());
-
-        assertEquals("Use new mapper: " + useNewMapper + ", new combiner: " + useNewCombiner + ", new reducer: " +
-                useNewReducer,
-            "blue\t" + blue + "\n" +
-                "green\t" + green + "\n" +
-                "red\t" + red + "\n" +
-                "yellow\t" + yellow + "\n",
-            actual
-        );
-    }
-
-    /**
-     * Gets if to compress output data with Snappy.
-     *
-     * @return If to compress output data with Snappy.
-     */
-    protected boolean compressOutputSnappy() {
-        return false;
-    }
-
-    /**
-     * Simple test job statistics.
-     *
-     * @param jobId Job id.
-     * @throws IgniteCheckedException
-     */
-    private void checkJobStatistics(HadoopJobId jobId) throws IgniteCheckedException, IOException {
-        HadoopCounters cntrs = grid(0).hadoop().counters(jobId);
-
-        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null);
-
-        Map<String, SortedMap<Integer,Long>> tasks = new TreeMap<>();
-
-        Map<String, Integer> phaseOrders = new HashMap<>();
-        phaseOrders.put("submit", 0);
-        phaseOrders.put("prepare", 1);
-        phaseOrders.put("start", 2);
-        phaseOrders.put("Cstart", 3);
-        phaseOrders.put("finish", 4);
-
-        String prevTaskId = null;
-
-        long apiEvtCnt = 0;
-
-        for (T2<String, Long> evt : perfCntr.evts()) {
-            //We expect string pattern: COMBINE 1 run 7fa86a14-5a08-40e3-a7cb-98109b52a706
-            String[] parsedEvt = evt.get1().split(" ");
-
-            String taskId;
-            String taskPhase;
-
-            if ("JOB".equals(parsedEvt[0])) {
-                taskId = parsedEvt[0];
-                taskPhase = parsedEvt[1];
-            }
-            else {
-                taskId = ("COMBINE".equals(parsedEvt[0]) ? "MAP" : parsedEvt[0].substring(0, 3)) + parsedEvt[1];
-                taskPhase = ("COMBINE".equals(parsedEvt[0]) ? "C" : "") + parsedEvt[2];
-            }
-
-            if (!taskId.equals(prevTaskId))
-                tasks.put(taskId, new TreeMap<Integer,Long>());
-
-            Integer pos = phaseOrders.get(taskPhase);
-
-            assertNotNull("Invalid phase " + taskPhase, pos);
-
-            tasks.get(taskId).put(pos, evt.get2());
-
-            prevTaskId = taskId;
-
-            apiEvtCnt++;
-        }
-
-        for (Map.Entry<String ,SortedMap<Integer,Long>> task : tasks.entrySet()) {
-            Map<Integer, Long> order = task.getValue();
-
-            long prev = 0;
-
-            for (Map.Entry<Integer, Long> phase : order.entrySet()) {
-                assertTrue("Phase order of " + task.getKey() + " is invalid", phase.getValue() >= prev);
-
-                prev = phase.getValue();
-            }
-        }
-
-        final IgfsPath statPath = new IgfsPath("/xxx/" + USER + "/zzz/" + jobId + "/performance");
-
-        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
-            @Override public boolean apply() {
-                return igfs.exists(statPath);
-            }
-        }, 20_000);
-
-        final long apiEvtCnt0 = apiEvtCnt;
-
-        boolean res = GridTestUtils.waitForCondition(new GridAbsPredicate() {
-            @Override public boolean apply() {
-                try {
-                    try (BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)))) {
-                        return apiEvtCnt0 == HadoopTestUtils.simpleCheckJobStatFile(reader);
-                    }
-                }
-                catch (IOException e) {
-                    throw new RuntimeException(e);
-                }
-            }
-        }, 10000);
-
-        if (!res) {
-            BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath)));
-
-            assert false : "Invalid API events count [exp=" + apiEvtCnt0 +
-                ", actual=" + HadoopTestUtils.simpleCheckJobStatFile(reader) + ']';
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        igniteSecondary = startGridWithIgfs("grid-secondary", SECONDARY_IGFS_NAME, PRIMARY, null, SECONDARY_REST_CFG);
-
-        super.beforeTest();
-    }
-
-    /**
-     * Start grid with IGFS.
-     *
-     * @param gridName Grid name.
-     * @param igfsName IGFS name
-     * @param mode IGFS mode.
-     * @param secondaryFs Secondary file system (optional).
-     * @param restCfg Rest configuration string (optional).
-     * @return Started grid instance.
-     * @throws Exception If failed.
-     */
-    protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode,
-        @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable IgfsIpcEndpointConfiguration restCfg) throws Exception {
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("dataCache");
-        igfsCfg.setMetaCacheName("metaCache");
-        igfsCfg.setName(igfsName);
-        igfsCfg.setBlockSize(IGFS_BLOCK_SIZE);
-        igfsCfg.setDefaultMode(mode);
-        igfsCfg.setIpcEndpointConfiguration(restCfg);
-        igfsCfg.setSecondaryFileSystem(secondaryFs);
-        igfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS);
-        igfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH);
-
-        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
-
-        dataCacheCfg.setName("dataCache");
-        dataCacheCfg.setCacheMode(PARTITIONED);
-        dataCacheCfg.setNearConfiguration(null);
-        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
-        dataCacheCfg.setBackups(0);
-        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
-        dataCacheCfg.setOffHeapMaxMemory(0);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("metaCache");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setConnectorConfiguration(null);
-
-        HadoopConfiguration hadoopCfg = createHadoopConfiguration();
-
-        if (hadoopCfg != null)
-            cfg.setHadoopConfiguration(hadoopCfg);
-
-        return G.start(cfg);
-    }
-
-    /**
-     * Creates custom Hadoop configuration.
-     *
-     * @return The Hadoop configuration.
-     */
-    protected HadoopConfiguration createHadoopConfiguration() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public FileSystemConfiguration igfsConfiguration() throws Exception {
-        FileSystemConfiguration fsCfg = super.igfsConfiguration();
-
-        secondaryFs = new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG);
-
-        fsCfg.setSecondaryFileSystem(secondaryFs);
-
-        return fsCfg;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
deleted file mode 100644
index fb16988..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import java.io.File;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.ConnectorConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.HadoopConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem;
-import org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper;
-import org.apache.ignite.igfs.IgfsIpcEndpointConfiguration;
-import org.apache.ignite.igfs.IgfsIpcEndpointType;
-import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
-
-/**
- * Abstract class for Hadoop tests.
- */
-public abstract class HadoopAbstractSelfTest extends GridCommonAbstractTest {
-    /** */
-    private static TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** REST port. */
-    protected static final int REST_PORT = 11212;
-
-    /** IGFS name. */
-    protected static final String igfsName = null;
-
-    /** IGFS name. */
-    protected static final String igfsMetaCacheName = "meta";
-
-    /** IGFS name. */
-    protected static final String igfsDataCacheName = "data";
-
-    /** IGFS block size. */
-    protected static final int igfsBlockSize = 1024;
-
-    /** IGFS block group size. */
-    protected static final int igfsBlockGroupSize = 8;
-
-    /** Initial REST port. */
-    private int restPort = REST_PORT;
-
-    /** Secondary file system REST endpoint configuration. */
-    protected static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG;
-
-    static {
-        SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration();
-
-        SECONDARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
-        SECONDARY_REST_CFG.setPort(11500);
-    }
-
-
-    /** Initial classpath. */
-    private static String initCp;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        // Add surefire classpath to regular classpath.
-        initCp = System.getProperty("java.class.path");
-
-        String surefireCp = System.getProperty("surefire.test.class.path");
-
-        if (surefireCp != null)
-            System.setProperty("java.class.path", initCp + File.pathSeparatorChar + surefireCp);
-
-        super.beforeTestsStarted();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        super.afterTestsStopped();
-
-        // Restore classpath.
-        System.setProperty("java.class.path", initCp);
-
-        initCp = null;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        cfg.setHadoopConfiguration(hadoopConfiguration(gridName));
-
-        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
-
-        commSpi.setSharedMemoryPort(-1);
-
-        cfg.setCommunicationSpi(commSpi);
-
-        TcpDiscoverySpi discoSpi = (TcpDiscoverySpi)cfg.getDiscoverySpi();
-
-        discoSpi.setIpFinder(IP_FINDER);
-
-        if (igfsEnabled()) {
-            cfg.setCacheConfiguration(metaCacheConfiguration(), dataCacheConfiguration());
-
-            cfg.setFileSystemConfiguration(igfsConfiguration());
-        }
-
-        if (restEnabled()) {
-            ConnectorConfiguration clnCfg = new ConnectorConfiguration();
-
-            clnCfg.setPort(restPort++);
-
-            cfg.setConnectorConfiguration(clnCfg);
-        }
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setPeerClassLoadingEnabled(false);
-
-        return cfg;
-    }
-
-    /**
-     * @param gridName Grid name.
-     * @return Hadoop configuration.
-     */
-    public HadoopConfiguration hadoopConfiguration(String gridName) {
-        HadoopConfiguration cfg = new HadoopConfiguration();
-
-        cfg.setMaxParallelTasks(3);
-
-        return cfg;
-    }
-
-    /**
-     * @return IGFS configuration.
-     */
-    public FileSystemConfiguration igfsConfiguration() throws Exception {
-        FileSystemConfiguration cfg = new FileSystemConfiguration();
-
-        cfg.setName(igfsName);
-        cfg.setBlockSize(igfsBlockSize);
-        cfg.setDataCacheName(igfsDataCacheName);
-        cfg.setMetaCacheName(igfsMetaCacheName);
-        cfg.setFragmentizerEnabled(false);
-
-        return cfg;
-    }
-
-    /**
-     * @return IGFS meta cache configuration.
-     */
-    public CacheConfiguration metaCacheConfiguration() {
-        CacheConfiguration cfg = new CacheConfiguration();
-
-        cfg.setName(igfsMetaCacheName);
-        cfg.setCacheMode(REPLICATED);
-        cfg.setAtomicityMode(TRANSACTIONAL);
-        cfg.setWriteSynchronizationMode(FULL_SYNC);
-
-        return cfg;
-    }
-
-    /**
-     * @return IGFS data cache configuration.
-     */
-    private CacheConfiguration dataCacheConfiguration() {
-        CacheConfiguration cfg = new CacheConfiguration();
-
-        cfg.setName(igfsDataCacheName);
-        cfg.setCacheMode(PARTITIONED);
-        cfg.setAtomicityMode(TRANSACTIONAL);
-        cfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(igfsBlockGroupSize));
-        cfg.setWriteSynchronizationMode(FULL_SYNC);
-
-        return cfg;
-    }
-
-    /**
-     * @return {@code True} if IGFS is enabled on Hadoop nodes.
-     */
-    protected boolean igfsEnabled() {
-        return false;
-    }
-
-    /**
-     * @return {@code True} if REST is enabled on Hadoop nodes.
-     */
-    protected boolean restEnabled() {
-        return false;
-    }
-
-    /**
-     * @return Number of nodes to start.
-     */
-    protected int gridCount() {
-        return 3;
-    }
-
-    /**
-     * @param cfg Config.
-     */
-    protected void setupFileSystems(Configuration cfg) {
-        cfg.set("fs.defaultFS", igfsScheme());
-        cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName());
-        cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.
-            class.getName());
-
-        HadoopFileSystemsUtils.setupFileSystems(cfg);
-    }
-
-    /**
-     * @return IGFS scheme for test.
-     */
-    protected String igfsScheme() {
-        return "igfs://:" + getTestGridName(0) + "@/";
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
deleted file mode 100644
index e45c127..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import com.google.common.base.Joiner;
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-
-/**
- * Abstract class for tests based on WordCount test job.
- */
-public abstract class HadoopAbstractWordCountTest extends HadoopAbstractSelfTest {
-    /** Input path. */
-    protected static final String PATH_INPUT = "/input";
-
-    /** Output path. */
-    protected static final String PATH_OUTPUT = "/output";
-
-    /** IGFS instance. */
-    protected IgfsEx igfs;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        super.beforeTestsStarted();
-
-        Configuration cfg = new Configuration();
-
-        setupFileSystems(cfg);
-
-        // Init cache by correct LocalFileSystem implementation
-        FileSystem.getLocal(cfg);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        igfs = (IgfsEx)startGrids(gridCount()).fileSystem(igfsName);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean igfsEnabled() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected int gridCount() {
-        return 1;
-    }
-
-    /**
-     * Generates test file.
-     *
-     * @param path File name.
-     * @param wordCounts Words and counts.
-     * @throws Exception If failed.
-     */
-    protected void generateTestFile(String path, Object... wordCounts) throws Exception {
-        List<String> wordsArr = new ArrayList<>();
-
-        //Generating
-        for (int i = 0; i < wordCounts.length; i += 2) {
-            String word = (String) wordCounts[i];
-            int cnt = (Integer) wordCounts[i + 1];
-
-            while (cnt-- > 0)
-                wordsArr.add(word);
-        }
-
-        //Shuffling
-        for (int i = 0; i < wordsArr.size(); i++) {
-            int j = (int)(Math.random() * wordsArr.size());
-
-            Collections.swap(wordsArr, i, j);
-        }
-
-        //Input file preparing
-        PrintWriter testInputFileWriter = new PrintWriter(igfs.create(new IgfsPath(path), true));
-
-        int j = 0;
-
-        while (j < wordsArr.size()) {
-            int i = 5 + (int)(Math.random() * 5);
-
-            List<String> subList = wordsArr.subList(j, Math.min(j + i, wordsArr.size()));
-            j += i;
-
-            testInputFileWriter.println(Joiner.on(' ').join(subList));
-        }
-
-        testInputFileWriter.close();
-    }
-
-    /**
-     * Read w/o decoding (default).
-     *
-     * @param fileName The file.
-     * @return The file contents, human-readable.
-     * @throws Exception On error.
-     */
-    protected String readAndSortFile(String fileName) throws Exception {
-        return readAndSortFile(fileName, null);
-    }
-
-    /**
-     * Reads whole text file into String.
-     *
-     * @param fileName Name of the file to read.
-     * @return Content of the file as String value.
-     * @throws Exception If could not read the file.
-     */
-    protected String readAndSortFile(String fileName, Configuration conf) throws Exception {
-        final List<String> list = new ArrayList<>();
-
-        final boolean snappyDecode = conf != null && conf.getBoolean(FileOutputFormat.COMPRESS, false);
-
-        if (snappyDecode) {
-            try (SequenceFile.Reader reader = new SequenceFile.Reader(conf,
-                    SequenceFile.Reader.file(new Path(fileName)))) {
-                Text key = new Text();
-
-                IntWritable val = new IntWritable();
-
-                while (reader.next(key, val))
-                    list.add(key + "\t" + val);
-            }
-        }
-        else {
-            try (InputStream is0 = igfs.open(new IgfsPath(fileName))) {
-                BufferedReader reader = new BufferedReader(new InputStreamReader(is0));
-
-                String line;
-
-                while ((line = reader.readLine()) != null)
-                    list.add(line);
-            }
-        }
-
-        Collections.sort(list);
-
-        return Joiner.on('\n').join(list) + "\n";
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
deleted file mode 100644
index 2fd7777..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import javax.security.auth.AuthPermission;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.ignite.internal.processors.hadoop.deps.CircularWIthHadoop;
-import org.apache.ignite.internal.processors.hadoop.deps.CircularWithoutHadoop;
-import org.apache.ignite.internal.processors.hadoop.deps.WithIndirectField;
-import org.apache.ignite.internal.processors.hadoop.deps.WithCast;
-import org.apache.ignite.internal.processors.hadoop.deps.WithClassAnnotation;
-import org.apache.ignite.internal.processors.hadoop.deps.WithConstructorInvocation;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodCheckedException;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodRuntimeException;
-import org.apache.ignite.internal.processors.hadoop.deps.WithExtends;
-import org.apache.ignite.internal.processors.hadoop.deps.WithField;
-import org.apache.ignite.internal.processors.hadoop.deps.WithImplements;
-import org.apache.ignite.internal.processors.hadoop.deps.WithInitializer;
-import org.apache.ignite.internal.processors.hadoop.deps.WithInnerClass;
-import org.apache.ignite.internal.processors.hadoop.deps.WithLocalVariable;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodAnnotation;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodInvocation;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodArgument;
-import org.apache.ignite.internal.processors.hadoop.deps.WithMethodReturnType;
-import org.apache.ignite.internal.processors.hadoop.deps.WithOuterClass;
-import org.apache.ignite.internal.processors.hadoop.deps.WithParameterAnnotation;
-import org.apache.ignite.internal.processors.hadoop.deps.WithStaticField;
-import org.apache.ignite.internal.processors.hadoop.deps.WithStaticInitializer;
-import org.apache.ignite.internal.processors.hadoop.deps.Without;
-
-/**
- * Tests for Hadoop classloader.
- */
-public class HadoopClassLoaderTest extends TestCase {
-    /** */
-    final HadoopClassLoader ldr = new HadoopClassLoader(null, "test", null);
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testClassLoading() throws Exception {
-        assertNotSame(CircularWIthHadoop.class, ldr.loadClass(CircularWIthHadoop.class.getName()));
-        assertNotSame(CircularWithoutHadoop.class, ldr.loadClass(CircularWithoutHadoop.class.getName()));
-
-        assertSame(Without.class, ldr.loadClass(Without.class.getName()));
-    }
-
-    /**
-     * Test dependency search.
-     */
-    public void testDependencySearch() {
-        // Positive cases:
-        final Class[] positiveClasses = {
-            Configuration.class,
-            HadoopUtils.class,
-            WithStaticField.class,
-            WithCast.class,
-            WithClassAnnotation.class,
-            WithConstructorInvocation.class,
-            WithMethodCheckedException.class,
-            WithMethodRuntimeException.class,
-            WithExtends.class,
-            WithField.class,
-            WithImplements.class,
-            WithInitializer.class,
-            WithInnerClass.class,
-            WithOuterClass.InnerNoHadoop.class,
-            WithLocalVariable.class,
-            WithMethodAnnotation.class,
-            WithMethodInvocation.class,
-            WithMethodArgument.class,
-            WithMethodReturnType.class,
-            WithParameterAnnotation.class,
-            WithStaticField.class,
-            WithStaticInitializer.class,
-            WithIndirectField.class,
-            CircularWIthHadoop.class,
-            CircularWithoutHadoop.class,
-        };
-
-        for (Class c : positiveClasses)
-            assertTrue(c.getName(), ldr.hasExternalDependencies(c.getName()));
-
-        // Negative cases:
-        final Class[] negativeClasses = {
-            Object.class,
-            AuthPermission.class,
-            Without.class,
-        };
-
-        for (Class c : negativeClasses)
-            assertFalse(c.getName(), ldr.hasExternalDependencies(c.getName()));
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
deleted file mode 100644
index 7ee318a..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop;
-
-import com.google.common.base.Joiner;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.InputStreamReader;
-import java.io.PrintWriter;
-import java.nio.file.Files;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import org.apache.ignite.IgniteSystemProperties;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopFileSystemCounterWriter;
-import org.apache.ignite.igfs.IgfsInputStream;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.internal.IgnitionEx;
-import org.apache.ignite.internal.processors.hadoop.jobtracker.HadoopJobTracker;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.processors.resource.GridSpringResourceContext;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.jsr166.ConcurrentHashMap8;
-
-/**
- * Test of integration with Hadoop client via command line interface.
- */
-public class HadoopCommandLineTest extends GridCommonAbstractTest {
-    /** IGFS instance. */
-    private IgfsEx igfs;
-
-    /** */
-    private static final String igfsName = "igfs";
-
-    /** */
-    private static File testWorkDir;
-
-    /** */
-    private static String hadoopHome;
-
-    /** */
-    private static String hiveHome;
-
-    /** */
-    private static File examplesJar;
-
-    /**
-     *
-     * @param path File name.
-     * @param wordCounts Words and counts.
-     * @throws Exception If failed.
-     */
-    private void generateTestFile(File path, Object... wordCounts) throws Exception {
-        List<String> wordsArr = new ArrayList<>();
-
-        //Generating
-        for (int i = 0; i < wordCounts.length; i += 2) {
-            String word = (String) wordCounts[i];
-            int cnt = (Integer) wordCounts[i + 1];
-
-            while (cnt-- > 0)
-                wordsArr.add(word);
-        }
-
-        //Shuffling
-        for (int i = 0; i < wordsArr.size(); i++) {
-            int j = (int)(Math.random() * wordsArr.size());
-
-            Collections.swap(wordsArr, i, j);
-        }
-
-        //Writing file
-        try (PrintWriter writer = new PrintWriter(path)) {
-            int j = 0;
-
-            while (j < wordsArr.size()) {
-                int i = 5 + (int)(Math.random() * 5);
-
-                List<String> subList = wordsArr.subList(j, Math.min(j + i, wordsArr.size()));
-                j += i;
-
-                writer.println(Joiner.on(' ').join(subList));
-            }
-
-            writer.flush();
-        }
-    }
-
-    /**
-     * Generates two data files to join its with Hive.
-     *
-     * @throws FileNotFoundException If failed.
-     */
-    private void generateHiveTestFiles() throws FileNotFoundException {
-        try (PrintWriter writerA = new PrintWriter(new File(testWorkDir, "data-a"));
-             PrintWriter writerB = new PrintWriter(new File(testWorkDir, "data-b"))) {
-            char sep = '\t';
-
-            int idB = 0;
-            int idA = 0;
-            int v = 1000;
-
-            for (int i = 0; i < 1000; i++) {
-                writerA.print(idA++);
-                writerA.print(sep);
-                writerA.println(idB);
-
-                writerB.print(idB++);
-                writerB.print(sep);
-                writerB.println(v += 2);
-
-                writerB.print(idB++);
-                writerB.print(sep);
-                writerB.println(v += 2);
-            }
-
-            writerA.flush();
-            writerB.flush();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        super.beforeTestsStarted();
-
-        hiveHome = IgniteSystemProperties.getString("HIVE_HOME");
-
-        assertFalse("HIVE_HOME hasn't been set.", F.isEmpty(hiveHome));
-
-        hadoopHome = IgniteSystemProperties.getString("HADOOP_HOME");
-
-        assertFalse("HADOOP_HOME hasn't been set.", F.isEmpty(hadoopHome));
-
-        String mapredHome = hadoopHome + "/share/hadoop/mapreduce";
-
-        File[] fileList = new File(mapredHome).listFiles(new FileFilter() {
-            @Override public boolean accept(File pathname) {
-                return pathname.getName().startsWith("hadoop-mapreduce-examples-") &&
-                    pathname.getName().endsWith(".jar");
-            }
-        });
-
-        assertEquals("Invalid hadoop distribution.", 1, fileList.length);
-
-        examplesJar = fileList[0];
-
-        testWorkDir = Files.createTempDirectory("hadoop-cli-test").toFile();
-
-        U.copy(resolveHadoopConfig("core-site.ignite.xml"), new File(testWorkDir, "core-site.xml"), false);
-
-        File srcFile = resolveHadoopConfig("mapred-site.ignite.xml");
-        File dstFile = new File(testWorkDir, "mapred-site.xml");
-
-        try (BufferedReader in = new BufferedReader(new FileReader(srcFile));
-             PrintWriter out = new PrintWriter(dstFile)) {
-            String line;
-
-            while ((line = in.readLine()) != null) {
-                if (line.startsWith("</configuration>"))
-                    out.println(
-                        "    <property>\n" +
-                        "        <name>" + HadoopUtils.JOB_COUNTER_WRITER_PROPERTY + "</name>\n" +
-                        "        <value>" + IgniteHadoopFileSystemCounterWriter.class.getName() + "</value>\n" +
-                        "    </property>\n");
-
-                out.println(line);
-            }
-
-            out.flush();
-        }
-
-        generateTestFile(new File(testWorkDir, "test-data"), "red", 100, "green", 200, "blue", 150, "yellow", 50);
-
-        generateHiveTestFiles();
-    }
-
-    /**
-     * Resolve Hadoop configuration file.
-     *
-     * @param name File name.
-     * @return Resolve file.
-     */
-    private static File resolveHadoopConfig(String name) {
-        File path = U.resolveIgnitePath("modules/hadoop/config/" + name);
-
-        return path != null ? path : U.resolveIgnitePath("config/hadoop/" + name);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        super.afterTestsStopped();
-
-        U.delete(testWorkDir);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        String cfgPath = "config/hadoop/default-config.xml";
-
-        IgniteBiTuple<IgniteConfiguration, GridSpringResourceContext> tup = IgnitionEx.loadConfiguration(cfgPath);
-
-        IgniteConfiguration cfg = tup.get1();
-
-        cfg.setLocalHost("127.0.0.1"); // Avoid connecting to other nodes.
-
-        igfs = (IgfsEx) Ignition.start(cfg).fileSystem(igfsName);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids(true);
-    }
-
-    /**
-     * Creates the process build with appropriate environment to run Hadoop CLI.
-     *
-     * @return Process builder.
-     */
-    private ProcessBuilder createProcessBuilder() {
-        String sep = ":";
-
-        String ggClsPath = HadoopJob.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep +
-            HadoopJobTracker.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep +
-            ConcurrentHashMap8.class.getProtectionDomain().getCodeSource().getLocation().getPath();
-
-        ProcessBuilder res = new ProcessBuilder();
-
-        res.environment().put("HADOOP_HOME", hadoopHome);
-        res.environment().put("HADOOP_CLASSPATH", ggClsPath);
-        res.environment().put("HADOOP_CONF_DIR", testWorkDir.getAbsolutePath());
-
-        res.redirectErrorStream(true);
-
-        return res;
-    }
-
-    /**
-     * Waits for process exit and prints the its output.
-     *
-     * @param proc Process.
-     * @return Exit code.
-     * @throws Exception If failed.
-     */
-    private int watchProcess(Process proc) throws Exception {
-        BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
-
-        String line;
-
-        while ((line = reader.readLine()) != null)
-            log().info(line);
-
-        return proc.waitFor();
-    }
-
-    /**
-     * Executes Hadoop command line tool.
-     *
-     * @param args Arguments for Hadoop command line tool.
-     * @return Process exit code.
-     * @throws Exception If failed.
-     */
-    private int executeHadoopCmd(String... args) throws Exception {
-        ProcessBuilder procBuilder = createProcessBuilder();
-
-        List<String> cmd = new ArrayList<>();
-
-        cmd.add(hadoopHome + "/bin/hadoop");
-        cmd.addAll(Arrays.asList(args));
-
-        procBuilder.command(cmd);
-
-        log().info("Execute: " + procBuilder.command());
-
-        return watchProcess(procBuilder.start());
-    }
-
-    /**
-     * Executes Hive query.
-     *
-     * @param qry Query.
-     * @return Process exit code.
-     * @throws Exception If failed.
-     */
-    private int executeHiveQuery(String qry) throws Exception {
-        ProcessBuilder procBuilder = createProcessBuilder();
-
-        List<String> cmd = new ArrayList<>();
-
-        procBuilder.command(cmd);
-
-        cmd.add(hiveHome + "/bin/hive");
-
-        cmd.add("--hiveconf");
-        cmd.add("hive.rpc.query.plan=true");
-
-        cmd.add("--hiveconf");
-        cmd.add("javax.jdo.option.ConnectionURL=jdbc:derby:" + testWorkDir.getAbsolutePath() + "/metastore_db;" +
-            "databaseName=metastore_db;create=true");
-
-        cmd.add("-e");
-        cmd.add(qry);
-
-        procBuilder.command(cmd);
-
-        log().info("Execute: " + procBuilder.command());
-
-        return watchProcess(procBuilder.start());
-    }
-
-    /**
-     * Tests Hadoop command line integration.
-     */
-    public void testHadoopCommandLine() throws Exception {
-        assertEquals(0, executeHadoopCmd("fs", "-ls", "/"));
-
-        assertEquals(0, executeHadoopCmd("fs", "-mkdir", "/input"));
-
-        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "test-data").getAbsolutePath(), "/input"));
-
-        assertTrue(igfs.exists(new IgfsPath("/input/test-data")));
-
-        assertEquals(0, executeHadoopCmd("jar", examplesJar.getAbsolutePath(), "wordcount", "/input", "/output"));
-
-        IgfsPath path = new IgfsPath("/user/" + System.getProperty("user.name") + "/");
-
-        assertTrue(igfs.exists(path));
-
-        IgfsPath jobStatPath = null;
-
-        for (IgfsPath jobPath : igfs.listPaths(path)) {
-            assertNull(jobStatPath);
-
-            jobStatPath = jobPath;
-        }
-
-        File locStatFile = new File(testWorkDir, "performance");
-
-        assertEquals(0, executeHadoopCmd("fs", "-get", jobStatPath.toString() + "/performance", locStatFile.toString()));
-
-        long evtCnt = HadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile)));
-
-        assertTrue(evtCnt >= 22); //It's the minimum amount of events for job with combiner.
-
-        assertTrue(igfs.exists(new IgfsPath("/output")));
-
-        BufferedReader in = new BufferedReader(new InputStreamReader(igfs.open(new IgfsPath("/output/part-r-00000"))));
-
-        List<String> res = new ArrayList<>();
-
-        String line;
-
-        while ((line = in.readLine()) != null)
-            res.add(line);
-
-        Collections.sort(res);
-
-        assertEquals("[blue\t150, green\t200, red\t100, yellow\t50]", res.toString());
-    }
-
-    /**
-     * Runs query check result.
-     *
-     * @param expRes Expected result.
-     * @param qry Query.
-     * @throws Exception If failed.
-     */
-    private void checkQuery(String expRes, String qry) throws Exception {
-        assertEquals(0, executeHiveQuery("drop table if exists result"));
-
-        assertEquals(0, executeHiveQuery(
-            "create table result " +
-            "row format delimited fields terminated by ' ' " +
-            "stored as textfile " +
-            "location '/result' as " + qry
-        ));
-
-        IgfsInputStream in = igfs.open(new IgfsPath("/result/000000_0"));
-
-        byte[] buf = new byte[(int) in.length()];
-
-        in.read(buf);
-
-        assertEquals(expRes, new String(buf));
-    }
-
-    /**
-     * Tests Hive integration.
-     */
-    public void testHiveCommandLine() throws Exception {
-        assertEquals(0, executeHiveQuery(
-            "create table table_a (" +
-                "id_a int," +
-                "id_b int" +
-            ") " +
-            "row format delimited fields terminated by '\\t'" +
-            "stored as textfile " +
-            "location '/table-a'"
-        ));
-
-        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "data-a").getAbsolutePath(), "/table-a"));
-
-        assertEquals(0, executeHiveQuery(
-            "create table table_b (" +
-                "id_b int," +
-                "rndv int" +
-            ") " +
-            "row format delimited fields terminated by '\\t'" +
-            "stored as textfile " +
-            "location '/table-b'"
-        ));
-
-        assertEquals(0, executeHadoopCmd("fs", "-put", new File(testWorkDir, "data-b").getAbsolutePath(), "/table-b"));
-
-        checkQuery(
-            "0 0\n" +
-            "1 2\n" +
-            "2 4\n" +
-            "3 6\n" +
-            "4 8\n" +
-            "5 10\n" +
-            "6 12\n" +
-            "7 14\n" +
-            "8 16\n" +
-            "9 18\n",
-            "select * from table_a order by id_a limit 10"
-        );
-
-        checkQuery("2000\n", "select count(id_b) from table_b");
-
-        checkQuery(
-            "250 500 2002\n" +
-            "251 502 2006\n" +
-            "252 504 2010\n" +
-            "253 506 2014\n" +
-            "254 508 2018\n" +
-            "255 510 2022\n" +
-            "256 512 2026\n" +
-            "257 514 2030\n" +
-            "258 516 2034\n" +
-            "259 518 2038\n",
-            "select a.id_a, a.id_b, b.rndv" +
-            " from table_a a" +
-            " inner join table_b b on a.id_b = b.id_b" +
-            " where b.rndv > 2000" +
-            " order by a.id_a limit 10"
-        );
-
-        checkQuery("1000\n", "select count(b.id_b) from table_a a inner join table_b b on a.id_b = b.id_b");
-    }
-}
\ No newline at end of file


[25/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/sherlock-holmes.txt
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/sherlock-holmes.txt b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/sherlock-holmes.txt
new file mode 100644
index 0000000..af52c04
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/internal/processors/hadoop/books/sherlock-holmes.txt
@@ -0,0 +1,13052 @@
+Project Gutenberg's The Adventures of Sherlock Holmes, by Arthur Conan Doyle
+
+This eBook is for the use of anyone anywhere at no cost and with
+almost no restrictions whatsoever.  You may copy it, give it away or
+re-use it under the terms of the Project Gutenberg License included
+with this eBook or online at www.gutenberg.net
+
+
+Title: The Adventures of Sherlock Holmes
+
+Author: Arthur Conan Doyle
+
+Posting Date: April 18, 2011 [EBook #1661]
+First Posted: November 29, 2002
+
+Language: English
+
+
+*** START OF THIS PROJECT GUTENBERG EBOOK THE ADVENTURES OF SHERLOCK HOLMES ***
+
+
+
+
+Produced by an anonymous Project Gutenberg volunteer and Jose Menendez
+
+
+
+
+
+
+
+
+
+THE ADVENTURES OF SHERLOCK HOLMES
+
+by
+
+SIR ARTHUR CONAN DOYLE
+
+
+
+   I. A Scandal in Bohemia
+  II. The Red-headed League
+ III. A Case of Identity
+  IV. The Boscombe Valley Mystery
+   V. The Five Orange Pips
+  VI. The Man with the Twisted Lip
+ VII. The Adventure of the Blue Carbuncle
+VIII. The Adventure of the Speckled Band
+  IX. The Adventure of the Engineer's Thumb
+   X. The Adventure of the Noble Bachelor
+  XI. The Adventure of the Beryl Coronet
+ XII. The Adventure of the Copper Beeches
+
+
+
+
+ADVENTURE I. A SCANDAL IN BOHEMIA
+
+I.
+
+To Sherlock Holmes she is always THE woman. I have seldom heard
+him mention her under any other name. In his eyes she eclipses
+and predominates the whole of her sex. It was not that he felt
+any emotion akin to love for Irene Adler. All emotions, and that
+one particularly, were abhorrent to his cold, precise but
+admirably balanced mind. He was, I take it, the most perfect
+reasoning and observing machine that the world has seen, but as a
+lover he would have placed himself in a false position. He never
+spoke of the softer passions, save with a gibe and a sneer. They
+were admirable things for the observer--excellent for drawing the
+veil from men's motives and actions. But for the trained reasoner
+to admit such intrusions into his own delicate and finely
+adjusted temperament was to introduce a distracting factor which
+might throw a doubt upon all his mental results. Grit in a
+sensitive instrument, or a crack in one of his own high-power
+lenses, would not be more disturbing than a strong emotion in a
+nature such as his. And yet there was but one woman to him, and
+that woman was the late Irene Adler, of dubious and questionable
+memory.
+
+I had seen little of Holmes lately. My marriage had drifted us
+away from each other. My own complete happiness, and the
+home-centred interests which rise up around the man who first
+finds himself master of his own establishment, were sufficient to
+absorb all my attention, while Holmes, who loathed every form of
+society with his whole Bohemian soul, remained in our lodgings in
+Baker Street, buried among his old books, and alternating from
+week to week between cocaine and ambition, the drowsiness of the
+drug, and the fierce energy of his own keen nature. He was still,
+as ever, deeply attracted by the study of crime, and occupied his
+immense faculties and extraordinary powers of observation in
+following out those clues, and clearing up those mysteries which
+had been abandoned as hopeless by the official police. From time
+to time I heard some vague account of his doings: of his summons
+to Odessa in the case of the Trepoff murder, of his clearing up
+of the singular tragedy of the Atkinson brothers at Trincomalee,
+and finally of the mission which he had accomplished so
+delicately and successfully for the reigning family of Holland.
+Beyond these signs of his activity, however, which I merely
+shared with all the readers of the daily press, I knew little of
+my former friend and companion.
+
+One night--it was on the twentieth of March, 1888--I was
+returning from a journey to a patient (for I had now returned to
+civil practice), when my way led me through Baker Street. As I
+passed the well-remembered door, which must always be associated
+in my mind with my wooing, and with the dark incidents of the
+Study in Scarlet, I was seized with a keen desire to see Holmes
+again, and to know how he was employing his extraordinary powers.
+His rooms were brilliantly lit, and, even as I looked up, I saw
+his tall, spare figure pass twice in a dark silhouette against
+the blind. He was pacing the room swiftly, eagerly, with his head
+sunk upon his chest and his hands clasped behind him. To me, who
+knew his every mood and habit, his attitude and manner told their
+own story. He was at work again. He had risen out of his
+drug-created dreams and was hot upon the scent of some new
+problem. I rang the bell and was shown up to the chamber which
+had formerly been in part my own.
+
+His manner was not effusive. It seldom was; but he was glad, I
+think, to see me. With hardly a word spoken, but with a kindly
+eye, he waved me to an armchair, threw across his case of cigars,
+and indicated a spirit case and a gasogene in the corner. Then he
+stood before the fire and looked me over in his singular
+introspective fashion.
+
+"Wedlock suits you," he remarked. "I think, Watson, that you have
+put on seven and a half pounds since I saw you."
+
+"Seven!" I answered.
+
+"Indeed, I should have thought a little more. Just a trifle more,
+I fancy, Watson. And in practice again, I observe. You did not
+tell me that you intended to go into harness."
+
+"Then, how do you know?"
+
+"I see it, I deduce it. How do I know that you have been getting
+yourself very wet lately, and that you have a most clumsy and
+careless servant girl?"
+
+"My dear Holmes," said I, "this is too much. You would certainly
+have been burned, had you lived a few centuries ago. It is true
+that I had a country walk on Thursday and came home in a dreadful
+mess, but as I have changed my clothes I can't imagine how you
+deduce it. As to Mary Jane, she is incorrigible, and my wife has
+given her notice, but there, again, I fail to see how you work it
+out."
+
+He chuckled to himself and rubbed his long, nervous hands
+together.
+
+"It is simplicity itself," said he; "my eyes tell me that on the
+inside of your left shoe, just where the firelight strikes it,
+the leather is scored by six almost parallel cuts. Obviously they
+have been caused by someone who has very carelessly scraped round
+the edges of the sole in order to remove crusted mud from it.
+Hence, you see, my double deduction that you had been out in vile
+weather, and that you had a particularly malignant boot-slitting
+specimen of the London slavey. As to your practice, if a
+gentleman walks into my rooms smelling of iodoform, with a black
+mark of nitrate of silver upon his right forefinger, and a bulge
+on the right side of his top-hat to show where he has secreted
+his stethoscope, I must be dull, indeed, if I do not pronounce
+him to be an active member of the medical profession."
+
+I could not help laughing at the ease with which he explained his
+process of deduction. "When I hear you give your reasons," I
+remarked, "the thing always appears to me to be so ridiculously
+simple that I could easily do it myself, though at each
+successive instance of your reasoning I am baffled until you
+explain your process. And yet I believe that my eyes are as good
+as yours."
+
+"Quite so," he answered, lighting a cigarette, and throwing
+himself down into an armchair. "You see, but you do not observe.
+The distinction is clear. For example, you have frequently seen
+the steps which lead up from the hall to this room."
+
+"Frequently."
+
+"How often?"
+
+"Well, some hundreds of times."
+
+"Then how many are there?"
+
+"How many? I don't know."
+
+"Quite so! You have not observed. And yet you have seen. That is
+just my point. Now, I know that there are seventeen steps,
+because I have both seen and observed. By-the-way, since you are
+interested in these little problems, and since you are good
+enough to chronicle one or two of my trifling experiences, you
+may be interested in this." He threw over a sheet of thick,
+pink-tinted note-paper which had been lying open upon the table.
+"It came by the last post," said he. "Read it aloud."
+
+The note was undated, and without either signature or address.
+
+"There will call upon you to-night, at a quarter to eight
+o'clock," it said, "a gentleman who desires to consult you upon a
+matter of the very deepest moment. Your recent services to one of
+the royal houses of Europe have shown that you are one who may
+safely be trusted with matters which are of an importance which
+can hardly be exaggerated. This account of you we have from all
+quarters received. Be in your chamber then at that hour, and do
+not take it amiss if your visitor wear a mask."
+
+"This is indeed a mystery," I remarked. "What do you imagine that
+it means?"
+
+"I have no data yet. It is a capital mistake to theorize before
+one has data. Insensibly one begins to twist facts to suit
+theories, instead of theories to suit facts. But the note itself.
+What do you deduce from it?"
+
+I carefully examined the writing, and the paper upon which it was
+written.
+
+"The man who wrote it was presumably well to do," I remarked,
+endeavouring to imitate my companion's processes. "Such paper
+could not be bought under half a crown a packet. It is peculiarly
+strong and stiff."
+
+"Peculiar--that is the very word," said Holmes. "It is not an
+English paper at all. Hold it up to the light."
+
+I did so, and saw a large "E" with a small "g," a "P," and a
+large "G" with a small "t" woven into the texture of the paper.
+
+"What do you make of that?" asked Holmes.
+
+"The name of the maker, no doubt; or his monogram, rather."
+
+"Not at all. The 'G' with the small 't' stands for
+'Gesellschaft,' which is the German for 'Company.' It is a
+customary contraction like our 'Co.' 'P,' of course, stands for
+'Papier.' Now for the 'Eg.' Let us glance at our Continental
+Gazetteer." He took down a heavy brown volume from his shelves.
+"Eglow, Eglonitz--here we are, Egria. It is in a German-speaking
+country--in Bohemia, not far from Carlsbad. 'Remarkable as being
+the scene of the death of Wallenstein, and for its numerous
+glass-factories and paper-mills.' Ha, ha, my boy, what do you
+make of that?" His eyes sparkled, and he sent up a great blue
+triumphant cloud from his cigarette.
+
+"The paper was made in Bohemia," I said.
+
+"Precisely. And the man who wrote the note is a German. Do you
+note the peculiar construction of the sentence--'This account of
+you we have from all quarters received.' A Frenchman or Russian
+could not have written that. It is the German who is so
+uncourteous to his verbs. It only remains, therefore, to discover
+what is wanted by this German who writes upon Bohemian paper and
+prefers wearing a mask to showing his face. And here he comes, if
+I am not mistaken, to resolve all our doubts."
+
+As he spoke there was the sharp sound of horses' hoofs and
+grating wheels against the curb, followed by a sharp pull at the
+bell. Holmes whistled.
+
+"A pair, by the sound," said he. "Yes," he continued, glancing
+out of the window. "A nice little brougham and a pair of
+beauties. A hundred and fifty guineas apiece. There's money in
+this case, Watson, if there is nothing else."
+
+"I think that I had better go, Holmes."
+
+"Not a bit, Doctor. Stay where you are. I am lost without my
+Boswell. And this promises to be interesting. It would be a pity
+to miss it."
+
+"But your client--"
+
+"Never mind him. I may want your help, and so may he. Here he
+comes. Sit down in that armchair, Doctor, and give us your best
+attention."
+
+A slow and heavy step, which had been heard upon the stairs and
+in the passage, paused immediately outside the door. Then there
+was a loud and authoritative tap.
+
+"Come in!" said Holmes.
+
+A man entered who could hardly have been less than six feet six
+inches in height, with the chest and limbs of a Hercules. His
+dress was rich with a richness which would, in England, be looked
+upon as akin to bad taste. Heavy bands of astrakhan were slashed
+across the sleeves and fronts of his double-breasted coat, while
+the deep blue cloak which was thrown over his shoulders was lined
+with flame-coloured silk and secured at the neck with a brooch
+which consisted of a single flaming beryl. Boots which extended
+halfway up his calves, and which were trimmed at the tops with
+rich brown fur, completed the impression of barbaric opulence
+which was suggested by his whole appearance. He carried a
+broad-brimmed hat in his hand, while he wore across the upper
+part of his face, extending down past the cheekbones, a black
+vizard mask, which he had apparently adjusted that very moment,
+for his hand was still raised to it as he entered. From the lower
+part of the face he appeared to be a man of strong character,
+with a thick, hanging lip, and a long, straight chin suggestive
+of resolution pushed to the length of obstinacy.
+
+"You had my note?" he asked with a deep harsh voice and a
+strongly marked German accent. "I told you that I would call." He
+looked from one to the other of us, as if uncertain which to
+address.
+
+"Pray take a seat," said Holmes. "This is my friend and
+colleague, Dr. Watson, who is occasionally good enough to help me
+in my cases. Whom have I the honour to address?"
+
+"You may address me as the Count Von Kramm, a Bohemian nobleman.
+I understand that this gentleman, your friend, is a man of honour
+and discretion, whom I may trust with a matter of the most
+extreme importance. If not, I should much prefer to communicate
+with you alone."
+
+I rose to go, but Holmes caught me by the wrist and pushed me
+back into my chair. "It is both, or none," said he. "You may say
+before this gentleman anything which you may say to me."
+
+The Count shrugged his broad shoulders. "Then I must begin," said
+he, "by binding you both to absolute secrecy for two years; at
+the end of that time the matter will be of no importance. At
+present it is not too much to say that it is of such weight it
+may have an influence upon European history."
+
+"I promise," said Holmes.
+
+"And I."
+
+"You will excuse this mask," continued our strange visitor. "The
+august person who employs me wishes his agent to be unknown to
+you, and I may confess at once that the title by which I have
+just called myself is not exactly my own."
+
+"I was aware of it," said Holmes dryly.
+
+"The circumstances are of great delicacy, and every precaution
+has to be taken to quench what might grow to be an immense
+scandal and seriously compromise one of the reigning families of
+Europe. To speak plainly, the matter implicates the great House
+of Ormstein, hereditary kings of Bohemia."
+
+"I was also aware of that," murmured Holmes, settling himself
+down in his armchair and closing his eyes.
+
+Our visitor glanced with some apparent surprise at the languid,
+lounging figure of the man who had been no doubt depicted to him
+as the most incisive reasoner and most energetic agent in Europe.
+Holmes slowly reopened his eyes and looked impatiently at his
+gigantic client.
+
+"If your Majesty would condescend to state your case," he
+remarked, "I should be better able to advise you."
+
+The man sprang from his chair and paced up and down the room in
+uncontrollable agitation. Then, with a gesture of desperation, he
+tore the mask from his face and hurled it upon the ground. "You
+are right," he cried; "I am the King. Why should I attempt to
+conceal it?"
+
+"Why, indeed?" murmured Holmes. "Your Majesty had not spoken
+before I was aware that I was addressing Wilhelm Gottsreich
+Sigismond von Ormstein, Grand Duke of Cassel-Felstein, and
+hereditary King of Bohemia."
+
+"But you can understand," said our strange visitor, sitting down
+once more and passing his hand over his high white forehead, "you
+can understand that I am not accustomed to doing such business in
+my own person. Yet the matter was so delicate that I could not
+confide it to an agent without putting myself in his power. I
+have come incognito from Prague for the purpose of consulting
+you."
+
+"Then, pray consult," said Holmes, shutting his eyes once more.
+
+"The facts are briefly these: Some five years ago, during a
+lengthy visit to Warsaw, I made the acquaintance of the well-known
+adventuress, Irene Adler. The name is no doubt familiar to you."
+
+"Kindly look her up in my index, Doctor," murmured Holmes without
+opening his eyes. For many years he had adopted a system of
+docketing all paragraphs concerning men and things, so that it
+was difficult to name a subject or a person on which he could not
+at once furnish information. In this case I found her biography
+sandwiched in between that of a Hebrew rabbi and that of a
+staff-commander who had written a monograph upon the deep-sea
+fishes.
+
+"Let me see!" said Holmes. "Hum! Born in New Jersey in the year
+1858. Contralto--hum! La Scala, hum! Prima donna Imperial Opera
+of Warsaw--yes! Retired from operatic stage--ha! Living in
+London--quite so! Your Majesty, as I understand, became entangled
+with this young person, wrote her some compromising letters, and
+is now desirous of getting those letters back."
+
+"Precisely so. But how--"
+
+"Was there a secret marriage?"
+
+"None."
+
+"No legal papers or certificates?"
+
+"None."
+
+"Then I fail to follow your Majesty. If this young person should
+produce her letters for blackmailing or other purposes, how is
+she to prove their authenticity?"
+
+"There is the writing."
+
+"Pooh, pooh! Forgery."
+
+"My private note-paper."
+
+"Stolen."
+
+"My own seal."
+
+"Imitated."
+
+"My photograph."
+
+"Bought."
+
+"We were both in the photograph."
+
+"Oh, dear! That is very bad! Your Majesty has indeed committed an
+indiscretion."
+
+"I was mad--insane."
+
+"You have compromised yourself seriously."
+
+"I was only Crown Prince then. I was young. I am but thirty now."
+
+"It must be recovered."
+
+"We have tried and failed."
+
+"Your Majesty must pay. It must be bought."
+
+"She will not sell."
+
+"Stolen, then."
+
+"Five attempts have been made. Twice burglars in my pay ransacked
+her house. Once we diverted her luggage when she travelled. Twice
+she has been waylaid. There has been no result."
+
+"No sign of it?"
+
+"Absolutely none."
+
+Holmes laughed. "It is quite a pretty little problem," said he.
+
+"But a very serious one to me," returned the King reproachfully.
+
+"Very, indeed. And what does she propose to do with the
+photograph?"
+
+"To ruin me."
+
+"But how?"
+
+"I am about to be married."
+
+"So I have heard."
+
+"To Clotilde Lothman von Saxe-Meningen, second daughter of the
+King of Scandinavia. You may know the strict principles of her
+family. She is herself the very soul of delicacy. A shadow of a
+doubt as to my conduct would bring the matter to an end."
+
+"And Irene Adler?"
+
+"Threatens to send them the photograph. And she will do it. I
+know that she will do it. You do not know her, but she has a soul
+of steel. She has the face of the most beautiful of women, and
+the mind of the most resolute of men. Rather than I should marry
+another woman, there are no lengths to which she would not
+go--none."
+
+"You are sure that she has not sent it yet?"
+
+"I am sure."
+
+"And why?"
+
+"Because she has said that she would send it on the day when the
+betrothal was publicly proclaimed. That will be next Monday."
+
+"Oh, then we have three days yet," said Holmes with a yawn. "That
+is very fortunate, as I have one or two matters of importance to
+look into just at present. Your Majesty will, of course, stay in
+London for the present?"
+
+"Certainly. You will find me at the Langham under the name of the
+Count Von Kramm."
+
+"Then I shall drop you a line to let you know how we progress."
+
+"Pray do so. I shall be all anxiety."
+
+"Then, as to money?"
+
+"You have carte blanche."
+
+"Absolutely?"
+
+"I tell you that I would give one of the provinces of my kingdom
+to have that photograph."
+
+"And for present expenses?"
+
+The King took a heavy chamois leather bag from under his cloak
+and laid it on the table.
+
+"There are three hundred pounds in gold and seven hundred in
+notes," he said.
+
+Holmes scribbled a receipt upon a sheet of his note-book and
+handed it to him.
+
+"And Mademoiselle's address?" he asked.
+
+"Is Briony Lodge, Serpentine Avenue, St. John's Wood."
+
+Holmes took a note of it. "One other question," said he. "Was the
+photograph a cabinet?"
+
+"It was."
+
+"Then, good-night, your Majesty, and I trust that we shall soon
+have some good news for you. And good-night, Watson," he added,
+as the wheels of the royal brougham rolled down the street. "If
+you will be good enough to call to-morrow afternoon at three
+o'clock I should like to chat this little matter over with you."
+
+
+II.
+
+At three o'clock precisely I was at Baker Street, but Holmes had
+not yet returned. The landlady informed me that he had left the
+house shortly after eight o'clock in the morning. I sat down
+beside the fire, however, with the intention of awaiting him,
+however long he might be. I was already deeply interested in his
+inquiry, for, though it was surrounded by none of the grim and
+strange features which were associated with the two crimes which
+I have already recorded, still, the nature of the case and the
+exalted station of his client gave it a character of its own.
+Indeed, apart from the nature of the investigation which my
+friend had on hand, there was something in his masterly grasp of
+a situation, and his keen, incisive reasoning, which made it a
+pleasure to me to study his system of work, and to follow the
+quick, subtle methods by which he disentangled the most
+inextricable mysteries. So accustomed was I to his invariable
+success that the very possibility of his failing had ceased to
+enter into my head.
+
+It was close upon four before the door opened, and a
+drunken-looking groom, ill-kempt and side-whiskered, with an
+inflamed face and disreputable clothes, walked into the room.
+Accustomed as I was to my friend's amazing powers in the use of
+disguises, I had to look three times before I was certain that it
+was indeed he. With a nod he vanished into the bedroom, whence he
+emerged in five minutes tweed-suited and respectable, as of old.
+Putting his hands into his pockets, he stretched out his legs in
+front of the fire and laughed heartily for some minutes.
+
+"Well, really!" he cried, and then he choked and laughed again
+until he was obliged to lie back, limp and helpless, in the
+chair.
+
+"What is it?"
+
+"It's quite too funny. I am sure you could never guess how I
+employed my morning, or what I ended by doing."
+
+"I can't imagine. I suppose that you have been watching the
+habits, and perhaps the house, of Miss Irene Adler."
+
+"Quite so; but the sequel was rather unusual. I will tell you,
+however. I left the house a little after eight o'clock this
+morning in the character of a groom out of work. There is a
+wonderful sympathy and freemasonry among horsey men. Be one of
+them, and you will know all that there is to know. I soon found
+Briony Lodge. It is a bijou villa, with a garden at the back, but
+built out in front right up to the road, two stories. Chubb lock
+to the door. Large sitting-room on the right side, well
+furnished, with long windows almost to the floor, and those
+preposterous English window fasteners which a child could open.
+Behind there was nothing remarkable, save that the passage window
+could be reached from the top of the coach-house. I walked round
+it and examined it closely from every point of view, but without
+noting anything else of interest.
+
+"I then lounged down the street and found, as I expected, that
+there was a mews in a lane which runs down by one wall of the
+garden. I lent the ostlers a hand in rubbing down their horses,
+and received in exchange twopence, a glass of half and half, two
+fills of shag tobacco, and as much information as I could desire
+about Miss Adler, to say nothing of half a dozen other people in
+the neighbourhood in whom I was not in the least interested, but
+whose biographies I was compelled to listen to."
+
+"And what of Irene Adler?" I asked.
+
+"Oh, she has turned all the men's heads down in that part. She is
+the daintiest thing under a bonnet on this planet. So say the
+Serpentine-mews, to a man. She lives quietly, sings at concerts,
+drives out at five every day, and returns at seven sharp for
+dinner. Seldom goes out at other times, except when she sings.
+Has only one male visitor, but a good deal of him. He is dark,
+handsome, and dashing, never calls less than once a day, and
+often twice. He is a Mr. Godfrey Norton, of the Inner Temple. See
+the advantages of a cabman as a confidant. They had driven him
+home a dozen times from Serpentine-mews, and knew all about him.
+When I had listened to all they had to tell, I began to walk up
+and down near Briony Lodge once more, and to think over my plan
+of campaign.
+
+"This Godfrey Norton was evidently an important factor in the
+matter. He was a lawyer. That sounded ominous. What was the
+relation between them, and what the object of his repeated
+visits? Was she his client, his friend, or his mistress? If the
+former, she had probably transferred the photograph to his
+keeping. If the latter, it was less likely. On the issue of this
+question depended whether I should continue my work at Briony
+Lodge, or turn my attention to the gentleman's chambers in the
+Temple. It was a delicate point, and it widened the field of my
+inquiry. I fear that I bore you with these details, but I have to
+let you see my little difficulties, if you are to understand the
+situation."
+
+"I am following you closely," I answered.
+
+"I was still balancing the matter in my mind when a hansom cab
+drove up to Briony Lodge, and a gentleman sprang out. He was a
+remarkably handsome man, dark, aquiline, and moustached--evidently
+the man of whom I had heard. He appeared to be in a
+great hurry, shouted to the cabman to wait, and brushed past the
+maid who opened the door with the air of a man who was thoroughly
+at home.
+
+"He was in the house about half an hour, and I could catch
+glimpses of him in the windows of the sitting-room, pacing up and
+down, talking excitedly, and waving his arms. Of her I could see
+nothing. Presently he emerged, looking even more flurried than
+before. As he stepped up to the cab, he pulled a gold watch from
+his pocket and looked at it earnestly, 'Drive like the devil,' he
+shouted, 'first to Gross & Hankey's in Regent Street, and then to
+the Church of St. Monica in the Edgeware Road. Half a guinea if
+you do it in twenty minutes!'
+
+"Away they went, and I was just wondering whether I should not do
+well to follow them when up the lane came a neat little landau,
+the coachman with his coat only half-buttoned, and his tie under
+his ear, while all the tags of his harness were sticking out of
+the buckles. It hadn't pulled up before she shot out of the hall
+door and into it. I only caught a glimpse of her at the moment,
+but she was a lovely woman, with a face that a man might die for.
+
+"'The Church of St. Monica, John,' she cried, 'and half a
+sovereign if you reach it in twenty minutes.'
+
+"This was quite too good to lose, Watson. I was just balancing
+whether I should run for it, or whether I should perch behind her
+landau when a cab came through the street. The driver looked
+twice at such a shabby fare, but I jumped in before he could
+object. 'The Church of St. Monica,' said I, 'and half a sovereign
+if you reach it in twenty minutes.' It was twenty-five minutes to
+twelve, and of course it was clear enough what was in the wind.
+
+"My cabby drove fast. I don't think I ever drove faster, but the
+others were there before us. The cab and the landau with their
+steaming horses were in front of the door when I arrived. I paid
+the man and hurried into the church. There was not a soul there
+save the two whom I had followed and a surpliced clergyman, who
+seemed to be expostulating with them. They were all three
+standing in a knot in front of the altar. I lounged up the side
+aisle like any other idler who has dropped into a church.
+Suddenly, to my surprise, the three at the altar faced round to
+me, and Godfrey Norton came running as hard as he could towards
+me.
+
+"'Thank God,' he cried. 'You'll do. Come! Come!'
+
+"'What then?' I asked.
+
+"'Come, man, come, only three minutes, or it won't be legal.'
+
+"I was half-dragged up to the altar, and before I knew where I was
+I found myself mumbling responses which were whispered in my ear,
+and vouching for things of which I knew nothing, and generally
+assisting in the secure tying up of Irene Adler, spinster, to
+Godfrey Norton, bachelor. It was all done in an instant, and
+there was the gentleman thanking me on the one side and the lady
+on the other, while the clergyman beamed on me in front. It was
+the most preposterous position in which I ever found myself in my
+life, and it was the thought of it that started me laughing just
+now. It seems that there had been some informality about their
+license, that the clergyman absolutely refused to marry them
+without a witness of some sort, and that my lucky appearance
+saved the bridegroom from having to sally out into the streets in
+search of a best man. The bride gave me a sovereign, and I mean
+to wear it on my watch-chain in memory of the occasion."
+
+"This is a very unexpected turn of affairs," said I; "and what
+then?"
+
+"Well, I found my plans very seriously menaced. It looked as if
+the pair might take an immediate departure, and so necessitate
+very prompt and energetic measures on my part. At the church
+door, however, they separated, he driving back to the Temple, and
+she to her own house. 'I shall drive out in the park at five as
+usual,' she said as she left him. I heard no more. They drove
+away in different directions, and I went off to make my own
+arrangements."
+
+"Which are?"
+
+"Some cold beef and a glass of beer," he answered, ringing the
+bell. "I have been too busy to think of food, and I am likely to
+be busier still this evening. By the way, Doctor, I shall want
+your co-operation."
+
+"I shall be delighted."
+
+"You don't mind breaking the law?"
+
+"Not in the least."
+
+"Nor running a chance of arrest?"
+
+"Not in a good cause."
+
+"Oh, the cause is excellent!"
+
+"Then I am your man."
+
+"I was sure that I might rely on you."
+
+"But what is it you wish?"
+
+"When Mrs. Turner has brought in the tray I will make it clear to
+you. Now," he said as he turned hungrily on the simple fare that
+our landlady had provided, "I must discuss it while I eat, for I
+have not much time. It is nearly five now. In two hours we must
+be on the scene of action. Miss Irene, or Madame, rather, returns
+from her drive at seven. We must be at Briony Lodge to meet her."
+
+"And what then?"
+
+"You must leave that to me. I have already arranged what is to
+occur. There is only one point on which I must insist. You must
+not interfere, come what may. You understand?"
+
+"I am to be neutral?"
+
+"To do nothing whatever. There will probably be some small
+unpleasantness. Do not join in it. It will end in my being
+conveyed into the house. Four or five minutes afterwards the
+sitting-room window will open. You are to station yourself close
+to that open window."
+
+"Yes."
+
+"You are to watch me, for I will be visible to you."
+
+"Yes."
+
+"And when I raise my hand--so--you will throw into the room what
+I give you to throw, and will, at the same time, raise the cry of
+fire. You quite follow me?"
+
+"Entirely."
+
+"It is nothing very formidable," he said, taking a long cigar-shaped
+roll from his pocket. "It is an ordinary plumber's smoke-rocket,
+fitted with a cap at either end to make it self-lighting.
+Your task is confined to that. When you raise your cry of fire,
+it will be taken up by quite a number of people. You may then
+walk to the end of the street, and I will rejoin you in ten
+minutes. I hope that I have made myself clear?"
+
+"I am to remain neutral, to get near the window, to watch you,
+and at the signal to throw in this object, then to raise the cry
+of fire, and to wait you at the corner of the street."
+
+"Precisely."
+
+"Then you may entirely rely on me."
+
+"That is excellent. I think, perhaps, it is almost time that I
+prepare for the new role I have to play."
+
+He disappeared into his bedroom and returned in a few minutes in
+the character of an amiable and simple-minded Nonconformist
+clergyman. His broad black hat, his baggy trousers, his white
+tie, his sympathetic smile, and general look of peering and
+benevolent curiosity were such as Mr. John Hare alone could have
+equalled. It was not merely that Holmes changed his costume. His
+expression, his manner, his very soul seemed to vary with every
+fresh part that he assumed. The stage lost a fine actor, even as
+science lost an acute reasoner, when he became a specialist in
+crime.
+
+It was a quarter past six when we left Baker Street, and it still
+wanted ten minutes to the hour when we found ourselves in
+Serpentine Avenue. It was already dusk, and the lamps were just
+being lighted as we paced up and down in front of Briony Lodge,
+waiting for the coming of its occupant. The house was just such
+as I had pictured it from Sherlock Holmes' succinct description,
+but the locality appeared to be less private than I expected. On
+the contrary, for a small street in a quiet neighbourhood, it was
+remarkably animated. There was a group of shabbily dressed men
+smoking and laughing in a corner, a scissors-grinder with his
+wheel, two guardsmen who were flirting with a nurse-girl, and
+several well-dressed young men who were lounging up and down with
+cigars in their mouths.
+
+"You see," remarked Holmes, as we paced to and fro in front of
+the house, "this marriage rather simplifies matters. The
+photograph becomes a double-edged weapon now. The chances are
+that she would be as averse to its being seen by Mr. Godfrey
+Norton, as our client is to its coming to the eyes of his
+princess. Now the question is, Where are we to find the
+photograph?"
+
+"Where, indeed?"
+
+"It is most unlikely that she carries it about with her. It is
+cabinet size. Too large for easy concealment about a woman's
+dress. She knows that the King is capable of having her waylaid
+and searched. Two attempts of the sort have already been made. We
+may take it, then, that she does not carry it about with her."
+
+"Where, then?"
+
+"Her banker or her lawyer. There is that double possibility. But
+I am inclined to think neither. Women are naturally secretive,
+and they like to do their own secreting. Why should she hand it
+over to anyone else? She could trust her own guardianship, but
+she could not tell what indirect or political influence might be
+brought to bear upon a business man. Besides, remember that she
+had resolved to use it within a few days. It must be where she
+can lay her hands upon it. It must be in her own house."
+
+"But it has twice been burgled."
+
+"Pshaw! They did not know how to look."
+
+"But how will you look?"
+
+"I will not look."
+
+"What then?"
+
+"I will get her to show me."
+
+"But she will refuse."
+
+"She will not be able to. But I hear the rumble of wheels. It is
+her carriage. Now carry out my orders to the letter."
+
+As he spoke the gleam of the side-lights of a carriage came round
+the curve of the avenue. It was a smart little landau which
+rattled up to the door of Briony Lodge. As it pulled up, one of
+the loafing men at the corner dashed forward to open the door in
+the hope of earning a copper, but was elbowed away by another
+loafer, who had rushed up with the same intention. A fierce
+quarrel broke out, which was increased by the two guardsmen, who
+took sides with one of the loungers, and by the scissors-grinder,
+who was equally hot upon the other side. A blow was struck, and
+in an instant the lady, who had stepped from her carriage, was
+the centre of a little knot of flushed and struggling men, who
+struck savagely at each other with their fists and sticks. Holmes
+dashed into the crowd to protect the lady; but just as he reached
+her he gave a cry and dropped to the ground, with the blood
+running freely down his face. At his fall the guardsmen took to
+their heels in one direction and the loungers in the other, while
+a number of better-dressed people, who had watched the scuffle
+without taking part in it, crowded in to help the lady and to
+attend to the injured man. Irene Adler, as I will still call her,
+had hurried up the steps; but she stood at the top with her
+superb figure outlined against the lights of the hall, looking
+back into the street.
+
+"Is the poor gentleman much hurt?" she asked.
+
+"He is dead," cried several voices.
+
+"No, no, there's life in him!" shouted another. "But he'll be
+gone before you can get him to hospital."
+
+"He's a brave fellow," said a woman. "They would have had the
+lady's purse and watch if it hadn't been for him. They were a
+gang, and a rough one, too. Ah, he's breathing now."
+
+"He can't lie in the street. May we bring him in, marm?"
+
+"Surely. Bring him into the sitting-room. There is a comfortable
+sofa. This way, please!"
+
+Slowly and solemnly he was borne into Briony Lodge and laid out
+in the principal room, while I still observed the proceedings
+from my post by the window. The lamps had been lit, but the
+blinds had not been drawn, so that I could see Holmes as he lay
+upon the couch. I do not know whether he was seized with
+compunction at that moment for the part he was playing, but I
+know that I never felt more heartily ashamed of myself in my life
+than when I saw the beautiful creature against whom I was
+conspiring, or the grace and kindliness with which she waited
+upon the injured man. And yet it would be the blackest treachery
+to Holmes to draw back now from the part which he had intrusted
+to me. I hardened my heart, and took the smoke-rocket from under
+my ulster. After all, I thought, we are not injuring her. We are
+but preventing her from injuring another.
+
+Holmes had sat up upon the couch, and I saw him motion like a man
+who is in need of air. A maid rushed across and threw open the
+window. At the same instant I saw him raise his hand and at the
+signal I tossed my rocket into the room with a cry of "Fire!" The
+word was no sooner out of my mouth than the whole crowd of
+spectators, well dressed and ill--gentlemen, ostlers, and
+servant-maids--joined in a general shriek of "Fire!" Thick clouds
+of smoke curled through the room and out at the open window. I
+caught a glimpse of rushing figures, and a moment later the voice
+of Holmes from within assuring them that it was a false alarm.
+Slipping through the shouting crowd I made my way to the corner
+of the street, and in ten minutes was rejoiced to find my
+friend's arm in mine, and to get away from the scene of uproar.
+He walked swiftly and in silence for some few minutes until we
+had turned down one of the quiet streets which lead towards the
+Edgeware Road.
+
+"You did it very nicely, Doctor," he remarked. "Nothing could
+have been better. It is all right."
+
+"You have the photograph?"
+
+"I know where it is."
+
+"And how did you find out?"
+
+"She showed me, as I told you she would."
+
+"I am still in the dark."
+
+"I do not wish to make a mystery," said he, laughing. "The matter
+was perfectly simple. You, of course, saw that everyone in the
+street was an accomplice. They were all engaged for the evening."
+
+"I guessed as much."
+
+"Then, when the row broke out, I had a little moist red paint in
+the palm of my hand. I rushed forward, fell down, clapped my hand
+to my face, and became a piteous spectacle. It is an old trick."
+
+"That also I could fathom."
+
+"Then they carried me in. She was bound to have me in. What else
+could she do? And into her sitting-room, which was the very room
+which I suspected. It lay between that and her bedroom, and I was
+determined to see which. They laid me on a couch, I motioned for
+air, they were compelled to open the window, and you had your
+chance."
+
+"How did that help you?"
+
+"It was all-important. When a woman thinks that her house is on
+fire, her instinct is at once to rush to the thing which she
+values most. It is a perfectly overpowering impulse, and I have
+more than once taken advantage of it. In the case of the
+Darlington substitution scandal it was of use to me, and also in
+the Arnsworth Castle business. A married woman grabs at her baby;
+an unmarried one reaches for her jewel-box. Now it was clear to
+me that our lady of to-day had nothing in the house more precious
+to her than what we are in quest of. She would rush to secure it.
+The alarm of fire was admirably done. The smoke and shouting were
+enough to shake nerves of steel. She responded beautifully. The
+photograph is in a recess behind a sliding panel just above the
+right bell-pull. She was there in an instant, and I caught a
+glimpse of it as she half-drew it out. When I cried out that it
+was a false alarm, she replaced it, glanced at the rocket, rushed
+from the room, and I have not seen her since. I rose, and, making
+my excuses, escaped from the house. I hesitated whether to
+attempt to secure the photograph at once; but the coachman had
+come in, and as he was watching me narrowly it seemed safer to
+wait. A little over-precipitance may ruin all."
+
+"And now?" I asked.
+
+"Our quest is practically finished. I shall call with the King
+to-morrow, and with you, if you care to come with us. We will be
+shown into the sitting-room to wait for the lady, but it is
+probable that when she comes she may find neither us nor the
+photograph. It might be a satisfaction to his Majesty to regain
+it with his own hands."
+
+"And when will you call?"
+
+"At eight in the morning. She will not be up, so that we shall
+have a clear field. Besides, we must be prompt, for this marriage
+may mean a complete change in her life and habits. I must wire to
+the King without delay."
+
+We had reached Baker Street and had stopped at the door. He was
+searching his pockets for the key when someone passing said:
+
+"Good-night, Mister Sherlock Holmes."
+
+There were several people on the pavement at the time, but the
+greeting appeared to come from a slim youth in an ulster who had
+hurried by.
+
+"I've heard that voice before," said Holmes, staring down the
+dimly lit street. "Now, I wonder who the deuce that could have
+been."
+
+
+III.
+
+I slept at Baker Street that night, and we were engaged upon our
+toast and coffee in the morning when the King of Bohemia rushed
+into the room.
+
+"You have really got it!" he cried, grasping Sherlock Holmes by
+either shoulder and looking eagerly into his face.
+
+"Not yet."
+
+"But you have hopes?"
+
+"I have hopes."
+
+"Then, come. I am all impatience to be gone."
+
+"We must have a cab."
+
+"No, my brougham is waiting."
+
+"Then that will simplify matters." We descended and started off
+once more for Briony Lodge.
+
+"Irene Adler is married," remarked Holmes.
+
+"Married! When?"
+
+"Yesterday."
+
+"But to whom?"
+
+"To an English lawyer named Norton."
+
+"But she could not love him."
+
+"I am in hopes that she does."
+
+"And why in hopes?"
+
+"Because it would spare your Majesty all fear of future
+annoyance. If the lady loves her husband, she does not love your
+Majesty. If she does not love your Majesty, there is no reason
+why she should interfere with your Majesty's plan."
+
+"It is true. And yet--Well! I wish she had been of my own
+station! What a queen she would have made!" He relapsed into a
+moody silence, which was not broken until we drew up in
+Serpentine Avenue.
+
+The door of Briony Lodge was open, and an elderly woman stood
+upon the steps. She watched us with a sardonic eye as we stepped
+from the brougham.
+
+"Mr. Sherlock Holmes, I believe?" said she.
+
+"I am Mr. Holmes," answered my companion, looking at her with a
+questioning and rather startled gaze.
+
+"Indeed! My mistress told me that you were likely to call. She
+left this morning with her husband by the 5:15 train from Charing
+Cross for the Continent."
+
+"What!" Sherlock Holmes staggered back, white with chagrin and
+surprise. "Do you mean that she has left England?"
+
+"Never to return."
+
+"And the papers?" asked the King hoarsely. "All is lost."
+
+"We shall see." He pushed past the servant and rushed into the
+drawing-room, followed by the King and myself. The furniture was
+scattered about in every direction, with dismantled shelves and
+open drawers, as if the lady had hurriedly ransacked them before
+her flight. Holmes rushed at the bell-pull, tore back a small
+sliding shutter, and, plunging in his hand, pulled out a
+photograph and a letter. The photograph was of Irene Adler
+herself in evening dress, the letter was superscribed to
+"Sherlock Holmes, Esq. To be left till called for." My friend
+tore it open and we all three read it together. It was dated at
+midnight of the preceding night and ran in this way:
+
+"MY DEAR MR. SHERLOCK HOLMES,--You really did it very well. You
+took me in completely. Until after the alarm of fire, I had not a
+suspicion. But then, when I found how I had betrayed myself, I
+began to think. I had been warned against you months ago. I had
+been told that if the King employed an agent it would certainly
+be you. And your address had been given me. Yet, with all this,
+you made me reveal what you wanted to know. Even after I became
+suspicious, I found it hard to think evil of such a dear, kind
+old clergyman. But, you know, I have been trained as an actress
+myself. Male costume is nothing new to me. I often take advantage
+of the freedom which it gives. I sent John, the coachman, to
+watch you, ran up stairs, got into my walking-clothes, as I call
+them, and came down just as you departed.
+
+"Well, I followed you to your door, and so made sure that I was
+really an object of interest to the celebrated Mr. Sherlock
+Holmes. Then I, rather imprudently, wished you good-night, and
+started for the Temple to see my husband.
+
+"We both thought the best resource was flight, when pursued by
+so formidable an antagonist; so you will find the nest empty when
+you call to-morrow. As to the photograph, your client may rest in
+peace. I love and am loved by a better man than he. The King may
+do what he will without hindrance from one whom he has cruelly
+wronged. I keep it only to safeguard myself, and to preserve a
+weapon which will always secure me from any steps which he might
+take in the future. I leave a photograph which he might care to
+possess; and I remain, dear Mr. Sherlock Holmes,
+
+                                      "Very truly yours,
+                                   "IRENE NORTON, n\ufffde ADLER."
+
+"What a woman--oh, what a woman!" cried the King of Bohemia, when
+we had all three read this epistle. "Did I not tell you how quick
+and resolute she was? Would she not have made an admirable queen?
+Is it not a pity that she was not on my level?"
+
+"From what I have seen of the lady she seems indeed to be on a
+very different level to your Majesty," said Holmes coldly. "I am
+sorry that I have not been able to bring your Majesty's business
+to a more successful conclusion."
+
+"On the contrary, my dear sir," cried the King; "nothing could be
+more successful. I know that her word is inviolate. The
+photograph is now as safe as if it were in the fire."
+
+"I am glad to hear your Majesty say so."
+
+"I am immensely indebted to you. Pray tell me in what way I can
+reward you. This ring--" He slipped an emerald snake ring from
+his finger and held it out upon the palm of his hand.
+
+"Your Majesty has something which I should value even more
+highly," said Holmes.
+
+"You have but to name it."
+
+"This photograph!"
+
+The King stared at him in amazement.
+
+"Irene's photograph!" he cried. "Certainly, if you wish it."
+
+"I thank your Majesty. Then there is no more to be done in the
+matter. I have the honour to wish you a very good-morning." He
+bowed, and, turning away without observing the hand which the
+King had stretched out to him, he set off in my company for his
+chambers.
+
+And that was how a great scandal threatened to affect the kingdom
+of Bohemia, and how the best plans of Mr. Sherlock Holmes were
+beaten by a woman's wit. He used to make merry over the
+cleverness of women, but I have not heard him do it of late. And
+when he speaks of Irene Adler, or when he refers to her
+photograph, it is always under the honourable title of the woman.
+
+
+
+ADVENTURE II. THE RED-HEADED LEAGUE
+
+I had called upon my friend, Mr. Sherlock Holmes, one day in the
+autumn of last year and found him in deep conversation with a
+very stout, florid-faced, elderly gentleman with fiery red hair.
+With an apology for my intrusion, I was about to withdraw when
+Holmes pulled me abruptly into the room and closed the door
+behind me.
+
+"You could not possibly have come at a better time, my dear
+Watson," he said cordially.
+
+"I was afraid that you were engaged."
+
+"So I am. Very much so."
+
+"Then I can wait in the next room."
+
+"Not at all. This gentleman, Mr. Wilson, has been my partner and
+helper in many of my most successful cases, and I have no
+doubt that he will be of the utmost use to me in yours also."
+
+The stout gentleman half rose from his chair and gave a bob of
+greeting, with a quick little questioning glance from his small
+fat-encircled eyes.
+
+"Try the settee," said Holmes, relapsing into his armchair and
+putting his fingertips together, as was his custom when in
+judicial moods. "I know, my dear Watson, that you share my love
+of all that is bizarre and outside the conventions and humdrum
+routine of everyday life. You have shown your relish for it by
+the enthusiasm which has prompted you to chronicle, and, if you
+will excuse my saying so, somewhat to embellish so many of my own
+little adventures."
+
+"Your cases have indeed been of the greatest interest to me," I
+observed.
+
+"You will remember that I remarked the other day, just before we
+went into the very simple problem presented by Miss Mary
+Sutherland, that for strange effects and extraordinary
+combinations we must go to life itself, which is always far more
+daring than any effort of the imagination."
+
+"A proposition which I took the liberty of doubting."
+
+"You did, Doctor, but none the less you must come round to my
+view, for otherwise I shall keep on piling fact upon fact on you
+until your reason breaks down under them and acknowledges me to
+be right. Now, Mr. Jabez Wilson here has been good enough to call
+upon me this morning, and to begin a narrative which promises to
+be one of the most singular which I have listened to for some
+time. You have heard me remark that the strangest and most unique
+things are very often connected not with the larger but with the
+smaller crimes, and occasionally, indeed, where there is room for
+doubt whether any positive crime has been committed. As far as I
+have heard it is impossible for me to say whether the present
+case is an instance of crime or not, but the course of events is
+certainly among the most singular that I have ever listened to.
+Perhaps, Mr. Wilson, you would have the great kindness to
+recommence your narrative. I ask you not merely because my friend
+Dr. Watson has not heard the opening part but also because the
+peculiar nature of the story makes me anxious to have every
+possible detail from your lips. As a rule, when I have heard some
+slight indication of the course of events, I am able to guide
+myself by the thousands of other similar cases which occur to my
+memory. In the present instance I am forced to admit that the
+facts are, to the best of my belief, unique."
+
+The portly client puffed out his chest with an appearance of some
+little pride and pulled a dirty and wrinkled newspaper from the
+inside pocket of his greatcoat. As he glanced down the
+advertisement column, with his head thrust forward and the paper
+flattened out upon his knee, I took a good look at the man and
+endeavoured, after the fashion of my companion, to read the
+indications which might be presented by his dress or appearance.
+
+I did not gain very much, however, by my inspection. Our visitor
+bore every mark of being an average commonplace British
+tradesman, obese, pompous, and slow. He wore rather baggy grey
+shepherd's check trousers, a not over-clean black frock-coat,
+unbuttoned in the front, and a drab waistcoat with a heavy brassy
+Albert chain, and a square pierced bit of metal dangling down as
+an ornament. A frayed top-hat and a faded brown overcoat with a
+wrinkled velvet collar lay upon a chair beside him. Altogether,
+look as I would, there was nothing remarkable about the man save
+his blazing red head, and the expression of extreme chagrin and
+discontent upon his features.
+
+Sherlock Holmes' quick eye took in my occupation, and he shook
+his head with a smile as he noticed my questioning glances.
+"Beyond the obvious facts that he has at some time done manual
+labour, that he takes snuff, that he is a Freemason, that he has
+been in China, and that he has done a considerable amount of
+writing lately, I can deduce nothing else."
+
+Mr. Jabez Wilson started up in his chair, with his forefinger
+upon the paper, but his eyes upon my companion.
+
+"How, in the name of good-fortune, did you know all that, Mr.
+Holmes?" he asked. "How did you know, for example, that I did
+manual labour. It's as true as gospel, for I began as a ship's
+carpenter."
+
+"Your hands, my dear sir. Your right hand is quite a size larger
+than your left. You have worked with it, and the muscles are more
+developed."
+
+"Well, the snuff, then, and the Freemasonry?"
+
+"I won't insult your intelligence by telling you how I read that,
+especially as, rather against the strict rules of your order, you
+use an arc-and-compass breastpin."
+
+"Ah, of course, I forgot that. But the writing?"
+
+"What else can be indicated by that right cuff so very shiny for
+five inches, and the left one with the smooth patch near the
+elbow where you rest it upon the desk?"
+
+"Well, but China?"
+
+"The fish that you have tattooed immediately above your right
+wrist could only have been done in China. I have made a small
+study of tattoo marks and have even contributed to the literature
+of the subject. That trick of staining the fishes' scales of a
+delicate pink is quite peculiar to China. When, in addition, I
+see a Chinese coin hanging from your watch-chain, the matter
+becomes even more simple."
+
+Mr. Jabez Wilson laughed heavily. "Well, I never!" said he. "I
+thought at first that you had done something clever, but I see
+that there was nothing in it, after all."
+
+"I begin to think, Watson," said Holmes, "that I make a mistake
+in explaining. 'Omne ignotum pro magnifico,' you know, and my
+poor little reputation, such as it is, will suffer shipwreck if I
+am so candid. Can you not find the advertisement, Mr. Wilson?"
+
+"Yes, I have got it now," he answered with his thick red finger
+planted halfway down the column. "Here it is. This is what began
+it all. You just read it for yourself, sir."
+
+I took the paper from him and read as follows:
+
+"TO THE RED-HEADED LEAGUE: On account of the bequest of the late
+Ezekiah Hopkins, of Lebanon, Pennsylvania, U. S. A., there is now
+another vacancy open which entitles a member of the League to a
+salary of 4 pounds a week for purely nominal services. All
+red-headed men who are sound in body and mind and above the age
+of twenty-one years, are eligible. Apply in person on Monday, at
+eleven o'clock, to Duncan Ross, at the offices of the League, 7
+Pope's Court, Fleet Street."
+
+"What on earth does this mean?" I ejaculated after I had twice
+read over the extraordinary announcement.
+
+Holmes chuckled and wriggled in his chair, as was his habit when
+in high spirits. "It is a little off the beaten track, isn't it?"
+said he. "And now, Mr. Wilson, off you go at scratch and tell us
+all about yourself, your household, and the effect which this
+advertisement had upon your fortunes. You will first make a note,
+Doctor, of the paper and the date."
+
+"It is The Morning Chronicle of April 27, 1890. Just two months
+ago."
+
+"Very good. Now, Mr. Wilson?"
+
+"Well, it is just as I have been telling you, Mr. Sherlock
+Holmes," said Jabez Wilson, mopping his forehead; "I have a small
+pawnbroker's business at Coburg Square, near the City. It's not a
+very large affair, and of late years it has not done more than
+just give me a living. I used to be able to keep two assistants,
+but now I only keep one; and I would have a job to pay him but
+that he is willing to come for half wages so as to learn the
+business."
+
+"What is the name of this obliging youth?" asked Sherlock Holmes.
+
+"His name is Vincent Spaulding, and he's not such a youth,
+either. It's hard to say his age. I should not wish a smarter
+assistant, Mr. Holmes; and I know very well that he could better
+himself and earn twice what I am able to give him. But, after
+all, if he is satisfied, why should I put ideas in his head?"
+
+"Why, indeed? You seem most fortunate in having an employ\ufffd who
+comes under the full market price. It is not a common experience
+among employers in this age. I don't know that your assistant is
+not as remarkable as your advertisement."
+
+"Oh, he has his faults, too," said Mr. Wilson. "Never was such a
+fellow for photography. Snapping away with a camera when he ought
+to be improving his mind, and then diving down into the cellar
+like a rabbit into its hole to develop his pictures. That is his
+main fault, but on the whole he's a good worker. There's no vice
+in him."
+
+"He is still with you, I presume?"
+
+"Yes, sir. He and a girl of fourteen, who does a bit of simple
+cooking and keeps the place clean--that's all I have in the
+house, for I am a widower and never had any family. We live very
+quietly, sir, the three of us; and we keep a roof over our heads
+and pay our debts, if we do nothing more.
+
+"The first thing that put us out was that advertisement.
+Spaulding, he came down into the office just this day eight
+weeks, with this very paper in his hand, and he says:
+
+"'I wish to the Lord, Mr. Wilson, that I was a red-headed man.'
+
+"'Why that?' I asks.
+
+"'Why,' says he, 'here's another vacancy on the League of the
+Red-headed Men. It's worth quite a little fortune to any man who
+gets it, and I understand that there are more vacancies than
+there are men, so that the trustees are at their wits' end what
+to do with the money. If my hair would only change colour, here's
+a nice little crib all ready for me to step into.'
+
+"'Why, what is it, then?' I asked. You see, Mr. Holmes, I am a
+very stay-at-home man, and as my business came to me instead of
+my having to go to it, I was often weeks on end without putting
+my foot over the door-mat. In that way I didn't know much of what
+was going on outside, and I was always glad of a bit of news.
+
+"'Have you never heard of the League of the Red-headed Men?' he
+asked with his eyes open.
+
+"'Never.'
+
+"'Why, I wonder at that, for you are eligible yourself for one
+of the vacancies.'
+
+"'And what are they worth?' I asked.
+
+"'Oh, merely a couple of hundred a year, but the work is slight,
+and it need not interfere very much with one's other
+occupations.'
+
+"Well, you can easily think that that made me prick up my ears,
+for the business has not been over-good for some years, and an
+extra couple of hundred would have been very handy.
+
+"'Tell me all about it,' said I.
+
+"'Well,' said he, showing me the advertisement, 'you can see for
+yourself that the League has a vacancy, and there is the address
+where you should apply for particulars. As far as I can make out,
+the League was founded by an American millionaire, Ezekiah
+Hopkins, who was very peculiar in his ways. He was himself
+red-headed, and he had a great sympathy for all red-headed men;
+so when he died it was found that he had left his enormous
+fortune in the hands of trustees, with instructions to apply the
+interest to the providing of easy berths to men whose hair is of
+that colour. From all I hear it is splendid pay and very little to
+do.'
+
+"'But,' said I, 'there would be millions of red-headed men who
+would apply.'
+
+"'Not so many as you might think,' he answered. 'You see it is
+really confined to Londoners, and to grown men. This American had
+started from London when he was young, and he wanted to do the
+old town a good turn. Then, again, I have heard it is no use your
+applying if your hair is light red, or dark red, or anything but
+real bright, blazing, fiery red. Now, if you cared to apply, Mr.
+Wilson, you would just walk in; but perhaps it would hardly be
+worth your while to put yourself out of the way for the sake of a
+few hundred pounds.'
+
+"Now, it is a fact, gentlemen, as you may see for yourselves,
+that my hair is of a very full and rich tint, so that it seemed
+to me that if there was to be any competition in the matter I
+stood as good a chance as any man that I had ever met. Vincent
+Spaulding seemed to know so much about it that I thought he might
+prove useful, so I just ordered him to put up the shutters for
+the day and to come right away with me. He was very willing to
+have a holiday, so we shut the business up and started off for
+the address that was given us in the advertisement.
+
+"I never hope to see such a sight as that again, Mr. Holmes. From
+north, south, east, and west every man who had a shade of red in
+his hair had tramped into the city to answer the advertisement.
+Fleet Street was choked with red-headed folk, and Pope's Court
+looked like a coster's orange barrow. I should not have thought
+there were so many in the whole country as were brought together
+by that single advertisement. Every shade of colour they
+were--straw, lemon, orange, brick, Irish-setter, liver, clay;
+but, as Spaulding said, there were not many who had the real
+vivid flame-coloured tint. When I saw how many were waiting, I
+would have given it up in despair; but Spaulding would not hear
+of it. How he did it I could not imagine, but he pushed and
+pulled and butted until he got me through the crowd, and right up
+to the steps which led to the office. There was a double stream
+upon the stair, some going up in hope, and some coming back
+dejected; but we wedged in as well as we could and soon found
+ourselves in the office."
+
+"Your experience has been a most entertaining one," remarked
+Holmes as his client paused and refreshed his memory with a huge
+pinch of snuff. "Pray continue your very interesting statement."
+
+"There was nothing in the office but a couple of wooden chairs
+and a deal table, behind which sat a small man with a head that
+was even redder than mine. He said a few words to each candidate
+as he came up, and then he always managed to find some fault in
+them which would disqualify them. Getting a vacancy did not seem
+to be such a very easy matter, after all. However, when our turn
+came the little man was much more favourable to me than to any of
+the others, and he closed the door as we entered, so that he
+might have a private word with us.
+
+"'This is Mr. Jabez Wilson,' said my assistant, 'and he is
+willing to fill a vacancy in the League.'
+
+"'And he is admirably suited for it,' the other answered. 'He has
+every requirement. I cannot recall when I have seen anything so
+fine.' He took a step backward, cocked his head on one side, and
+gazed at my hair until I felt quite bashful. Then suddenly he
+plunged forward, wrung my hand, and congratulated me warmly on my
+success.
+
+"'It would be injustice to hesitate,' said he. 'You will,
+however, I am sure, excuse me for taking an obvious precaution.'
+With that he seized my hair in both his hands, and tugged until I
+yelled with the pain. 'There is water in your eyes,' said he as
+he released me. 'I perceive that all is as it should be. But we
+have to be careful, for we have twice been deceived by wigs and
+once by paint. I could tell you tales of cobbler's wax which
+would disgust you with human nature.' He stepped over to the
+window and shouted through it at the top of his voice that the
+vacancy was filled. A groan of disappointment came up from below,
+and the folk all trooped away in different directions until there
+was not a red-head to be seen except my own and that of the
+manager.
+
+"'My name,' said he, 'is Mr. Duncan Ross, and I am myself one of
+the pensioners upon the fund left by our noble benefactor. Are
+you a married man, Mr. Wilson? Have you a family?'
+
+"I answered that I had not.
+
+"His face fell immediately.
+
+"'Dear me!' he said gravely, 'that is very serious indeed! I am
+sorry to hear you say that. The fund was, of course, for the
+propagation and spread of the red-heads as well as for their
+maintenance. It is exceedingly unfortunate that you should be a
+bachelor.'
+
+"My face lengthened at this, Mr. Holmes, for I thought that I was
+not to have the vacancy after all; but after thinking it over for
+a few minutes he said that it would be all right.
+
+"'In the case of another,' said he, 'the objection might be
+fatal, but we must stretch a point in favour of a man with such a
+head of hair as yours. When shall you be able to enter upon your
+new duties?'
+
+"'Well, it is a little awkward, for I have a business already,'
+said I.
+
+"'Oh, never mind about that, Mr. Wilson!' said Vincent Spaulding.
+'I should be able to look after that for you.'
+
+"'What would be the hours?' I asked.
+
+"'Ten to two.'
+
+"Now a pawnbroker's business is mostly done of an evening, Mr.
+Holmes, especially Thursday and Friday evening, which is just
+before pay-day; so it would suit me very well to earn a little in
+the mornings. Besides, I knew that my assistant was a good man,
+and that he would see to anything that turned up.
+
+"'That would suit me very well,' said I. 'And the pay?'
+
+"'Is 4 pounds a week.'
+
+"'And the work?'
+
+"'Is purely nominal.'
+
+"'What do you call purely nominal?'
+
+"'Well, you have to be in the office, or at least in the
+building, the whole time. If you leave, you forfeit your whole
+position forever. The will is very clear upon that point. You
+don't comply with the conditions if you budge from the office
+during that time.'
+
+"'It's only four hours a day, and I should not think of leaving,'
+said I.
+
+"'No excuse will avail,' said Mr. Duncan Ross; 'neither sickness
+nor business nor anything else. There you must stay, or you lose
+your billet.'
+
+"'And the work?'
+
+"'Is to copy out the "Encyclopaedia Britannica." There is the first
+volume of it in that press. You must find your own ink, pens, and
+blotting-paper, but we provide this table and chair. Will you be
+ready to-morrow?'
+
+"'Certainly,' I answered.
+
+"'Then, good-bye, Mr. Jabez Wilson, and let me congratulate you
+once more on the important position which you have been fortunate
+enough to gain.' He bowed me out of the room and I went home with
+my assistant, hardly knowing what to say or do, I was so pleased
+at my own good fortune.
+
+"Well, I thought over the matter all day, and by evening I was in
+low spirits again; for I had quite persuaded myself that the
+whole affair must be some great hoax or fraud, though what its
+object might be I could not imagine. It seemed altogether past
+belief that anyone could make such a will, or that they would pay
+such a sum for doing anything so simple as copying out the
+'Encyclopaedia Britannica.' Vincent Spaulding did what he could to
+cheer me up, but by bedtime I had reasoned myself out of the
+whole thing. However, in the morning I determined to have a look
+at it anyhow, so I bought a penny bottle of ink, and with a
+quill-pen, and seven sheets of foolscap paper, I started off for
+Pope's Court.
+
+"Well, to my surprise and delight, everything was as right as
+possible. The table was set out ready for me, and Mr. Duncan Ross
+was there to see that I got fairly to work. He started me off
+upon the letter A, and then he left me; but he would drop in from
+time to time to see that all was right with me. At two o'clock he
+bade me good-day, complimented me upon the amount that I had
+written, and locked the door of the office after me.
+
+"This went on day after day, Mr. Holmes, and on Saturday the
+manager came in and planked down four golden sovereigns for my
+week's work. It was the same next week, and the same the week
+after. Every morning I was there at ten, and every afternoon I
+left at two. By degrees Mr. Duncan Ross took to coming in only
+once of a morning, and then, after a time, he did not come in at
+all. Still, of course, I never dared to leave the room for an
+instant, for I was not sure when he might come, and the billet
+was such a good one, and suited me so well, that I would not risk
+the loss of it.
+
+"Eight weeks passed away like this, and I had written about
+Abbots and Archery and Armour and Architecture and Attica, and
+hoped with diligence that I might get on to the B's before very
+long. It cost me something in foolscap, and I had pretty nearly
+filled a shelf with my writings. And then suddenly the whole
+business came to an end."
+
+"To an end?"
+
+"Yes, sir. And no later than this morning. I went to my work as
+usual at ten o'clock, but the door was shut and locked, with a
+little square of cardboard hammered on to the middle of the
+panel with a tack. Here it is, and you can read for yourself."
+
+He held up a piece of white cardboard about the size of a sheet
+of note-paper. It read in this fashion:
+
+                  THE RED-HEADED LEAGUE
+
+                           IS
+
+                        DISSOLVED.
+
+                     October 9, 1890.
+
+Sherlock Holmes and I surveyed this curt announcement and the
+rueful face behind it, until the comical side of the affair so
+completely overtopped every other consideration that we both
+burst out into a roar of laughter.
+
+"I cannot see that there is anything very funny," cried our
+client, flushing up to the roots of his flaming head. "If you can
+do nothing better than laugh at me, I can go elsewhere."
+
+"No, no," cried Holmes, shoving him back into the chair from
+which he had half risen. "I really wouldn't miss your case for
+the world. It is most refreshingly unusual. But there is, if you
+will excuse my saying so, something just a little funny about it.
+Pray what steps did you take when you found the card upon the
+door?"
+
+"I was staggered, sir. I did not know what to do. Then I called
+at the offices round, but none of them seemed to know anything
+about it. Finally, I went to the landlord, who is an accountant
+living on the ground-floor, and I asked him if he could tell me
+what had become of the Red-headed League. He said that he had
+never heard of any such body. Then I asked him who Mr. Duncan
+Ross was. He answered that the name was new to him.
+
+"'Well,' said I, 'the gentleman at No. 4.'
+
+"'What, the red-headed man?'
+
+"'Yes.'
+
+"'Oh,' said he, 'his name was William Morris. He was a solicitor
+and was using my room as a temporary convenience until his new
+premises were ready. He moved out yesterday.'
+
+"'Where could I find him?'
+
+"'Oh, at his new offices. He did tell me the address. Yes, 17
+King Edward Street, near St. Paul's.'
+
+"I started off, Mr. Holmes, but when I got to that address it was
+a manufactory of artificial knee-caps, and no one in it had ever
+heard of either Mr. William Morris or Mr. Duncan Ross."
+
+"And what did you do then?" asked Holmes.
+
+"I went home to Saxe-Coburg Square, and I took the advice of my
+assistant. But he could not help me in any way. He could only say
+that if I waited I should hear by post. But that was not quite
+good enough, Mr. Holmes. I did not wish to lose such a place
+without a struggle, so, as I had heard that you were good enough
+to give advice to poor folk who were in need of it, I came right
+away to you."
+
+"And you did very wisely," said Holmes. "Your case is an
+exceedingly remarkable one, and I shall be happy to look into it.
+From what you have told me I think that it is possible that
+graver issues hang from it than might at first sight appear."
+
+"Grave enough!" said Mr. Jabez Wilson. "Why, I have lost four
+pound a week."
+
+"As far as you are personally concerned," remarked Holmes, "I do
+not see that you have any grievance against this extraordinary
+league. On the contrary, you are, as I understand, richer by some
+30 pounds, to say nothing of the minute knowledge which you have
+gained on every subject which comes under the letter A. You have
+lost nothing by them."
+
+"No, sir. But I want to find out about them, and who they are,
+and what their object was in playing this prank--if it was a
+prank--upon me. It was a pretty expensive joke for them, for it
+cost them two and thirty pounds."
+
+"We shall endeavour to clear up these points for you. And, first,
+one or two questions, Mr. Wilson. This assistant of yours who
+first called your attention to the advertisement--how long had he
+been with you?"
+
+"About a month then."
+
+"How did he come?"
+
+"In answer to an advertisement."
+
+"Was he the only applicant?"
+
+"No, I had a dozen."
+
+"Why did you pick him?"
+
+"Because he was handy and would come cheap."
+
+"At half-wages, in fact."
+
+"Yes."
+
+"What is he like, this Vincent Spaulding?"
+
+"Small, stout-built, very quick in his ways, no hair on his face,
+though he's not short of thirty. Has a white splash of acid upon
+his forehead."
+
+Holmes sat up in his chair in considerable excitement. "I thought
+as much," said he. "Have you ever observed that his ears are
+pierced for earrings?"
+
+"Yes, sir. He told me that a gipsy had done it for him when he
+was a lad."
+
+"Hum!" said Holmes, sinking back in deep thought. "He is still
+with you?"
+
+"Oh, yes, sir; I have only just left him."
+
+"And has your business been attended to in your absence?"
+
+"Nothing to complain of, sir. There's never very much to do of a
+morning."
+
+"That will do, Mr. Wilson. I shall be happy to give you an
+opinion upon the subject in the course of a day or two. To-day is
+Saturday, and I hope that by Monday we may come to a conclusion."
+
+"Well, Watson," said Holmes when our visitor had left us, "what
+do you make of it all?"
+
+"I make nothing of it," I answered frankly. "It is a most
+mysterious business."
+
+"As a rule," said Holmes, "the more bizarre a thing is the less
+mysterious it proves to be. It is your commonplace, featureless
+crimes which are really puzzling, just as a commonplace face is
+the most difficult to identify. But I must be prompt over this
+matter."
+
+"What are you going to do, then?" I asked.
+
+"To smoke," he answered. "It is quite a three pipe problem, and I
+beg that you won't speak to me for fifty minutes." He curled
+himself up in his chair, with his thin knees drawn up to his
+hawk-like nose, and there he sat with his eyes closed and his
+black clay pipe thrusting out like the bill of some strange bird.
+I had come to the conclusion that he had dropped asleep, and
+indeed was nodding myself, when he suddenly sprang out of his
+chair with the gesture of a man who has made up his mind and put
+his pipe down upon the mantelpiece.
+
+"Sarasate plays at the St. James's Hall this afternoon," he
+remarked. "What do you think, Watson? Could your patients spare
+you for a few hours?"
+
+"I have nothing to do to-day. My practice is never very
+absorbing."
+
+"Then put on your hat and come. I am going through the City
+first, and we can have some lunch on the way. I observe that
+there is a good deal of German music on the programme, which is
+rather more to my taste than Italian or French. It is
+introspective, and I want to introspect. Come along!"
+
+We travelled by the Underground as far as Aldersgate; and a short
+walk took us to Saxe-Coburg Square, the scene of the singular
+story which we had listened to in the morning. It was a poky,
+little, shabby-genteel place, where four lines of dingy
+two-storied brick houses looked out into a small railed-in
+enclosure, where a lawn of weedy grass and a few clumps of faded
+laurel-bushes made a hard fight against a smoke-laden and
+uncongenial atmosphere. Three gilt balls and a brown board with
+"JABEZ WILSON" in white letters, upon a corner house, announced
+the place where our red-headed client carried on his business.
+Sherlock Holmes stopped in front of it with his head on one side
+and looked it all over, with his eyes shining brightly between
+puckered lids. Then he walked slowly up the street, and then down
+again to the corner, still looking keenly at the houses. Finally
+he returned to the pawnbroker's, and, having thumped vigorously
+upon the pavement with his stick two or three times, he went up
+to the door and knocked. It was instantly opened by a
+bright-looking, clean-shaven young fellow, who asked him to step
+in.
+
+"Thank you," said Holmes, "I only wished to ask you how you would
+go from here to the Strand."
+
+"Third right, fourth left," answered the assistant promptly,
+closing the door.
+
+"Smart fellow, that," observed Holmes as we walked away. "He is,
+in my judgment, the fourth smartest man in London, and for daring
+I am not sure that he has not a claim to be third. I have known
+something of him before."
+
+"Evidently," said I, "Mr. Wilson's assistant counts for a good
+deal in this mystery of the Red-headed League. I am sure that you
+inquired your way merely in order that you might see him."
+
+"Not him."
+
+"What then?"
+
+"The knees of his trousers."
+
+"And what did you see?"
+
+"What I expected to see."
+
+"Why did you beat the pavement?"
+
+"My dear doctor, this is a time for observation, not for talk. We
+are spies in an enemy's country. We know something of Saxe-Coburg
+Square. Let us now explore the parts which lie behind it."
+
+The road in which we found ourselves as we turned round the
+corner from the retired Saxe-Coburg Square presented as great a
+contrast to it as the front of a picture does to the back. It was
+one of the main arteries which conveyed the traffic of the City
+to the north and west. The roadway was blocked with the immense
+stream of commerce flowing in a double tide inward and outward,
+while the footpaths were black with the hurrying swarm of
+pedestrians. It was difficult to realise as we looked at the line
+of fine shops and stately business premises that they really
+abutted on the other side upon the faded and stagnant square
+which we had just quitted.
+
+"Let me see," said Holmes, standing at the corner and glancing
+along the line, "I should like just to remember the order of the
+houses here. It is a hobby of mine to have an exact knowledge of
+London. There is Mortimer's, the tobacconist, the little
+newspaper shop, the Coburg branch of the City and Suburban Bank,
+the Vegetarian Restaurant, and McFarlane's carriage-building
+depot. That carries us right on to the other block. And now,
+Doctor, we've done our work, so it's time we had some play. A
+sandwich and a cup of coffee, and then off to violin-land, where
+all is sweetness and delicacy and harmony, and there are no
+red-headed clients to vex us with their conundrums."
+
+My friend was an enthusiastic musician, being himself not only a
+very capable performer but a composer of no ordinary merit. All
+the afternoon he sat in the stalls wrapped in the most perfect
+happiness, gently waving his long, thin fingers in time to the
+music, while his gently smiling face and his languid, dreamy eyes
+were as unlike those of Holmes the sleuth-hound, Holmes the
+relentless, keen-witted, ready-handed criminal agent, as it was
+possible to conceive. In his singular character the dual nature
+alternately asserted itself, and his extreme exactness and
+astuteness represented, as I have often thought, the reaction
+against the poetic and contemplative mood which occasionally
+predominated in him. The swing of his nature took him from
+extreme languor to devouring energy; and, as I knew well, he was
+never so truly formidable as when, for days on end, he had been
+lounging in his armchair amid his improvisations and his
+black-letter editions. Then it was that the lust of the chase
+would suddenly come upon him, and that his brilliant reasoning
+power would rise to the level of intuition, until those who were
+unacquainted with his methods would look askance at him as on a
+man whose knowledge was not that of other mortals. When I saw him
+that afternoon so enwrapped in the music at St. James's Hall I
+felt that an evil time might be coming upon those whom he had set
+himself to hunt down.
+
+"You want to go home, no doubt, Doctor," he remarked as we
+emerged.
+
+"Yes, it would be as well."
+
+"And I have some business to do which will take some hours. This
+business at Coburg Square is serious."
+
+"Why serious?"
+
+"A considerable crime is in contemplation. I have every reason to
+believe that we shall be in time to stop it. But to-day being
+Saturday rather complicates matters. I shall want your help
+to-night."
+
+"At what time?"
+
+"Ten will be early enough."
+
+"I shall be at Baker Street at ten."
+
+"Very well. And, I say, Doctor, there may be some little danger,
+so kindly put your army revolver in your pocket." He waved his
+hand, turned on his heel, and disappeared in an instant among the
+crowd.
+
+I trust that I am not more dense than my neighbours, but I was
+always oppressed with a sense of my own stupidity in my dealings
+with Sherlock Holmes. Here I had heard what he had heard, I had
+seen what he had seen, and yet from his words it was evident that
+he saw clearly not only what had happened but what was about to
+happen, while to me the whole business was still confused and
+grotesque. As I drove home to my house in Kensington I thought
+over it all, from the extraordinary story of the red-headed
+copier of the "Encyclopaedia" down to the visit to Saxe-Coburg
+Square, and the ominous words with which he had parted from me.
+What was this nocturnal expedition, and why should I go armed?
+Where were we going, and what were we to do? I had the hint from
+Holmes that this smooth-faced pawnbroker's assistant was a
+formidable man--a man who might play a deep game. I tried to
+puzzle it out, but gave it up in despair and set the matter aside
+until night should bring an explanation.
+
+It was a quarter-past nine when I started from home and made my
+way across the Park, and so through Oxford Street to Baker
+Street. Two hansoms were standing at the door, and as I entered
+the passage I heard the sound of voices from above. On entering
+his room I found Holmes in animated conversation with two men,
+one of whom I recognised as Peter Jones, the official police
+agent, while the other was a long, thin, sad-faced man, with a
+very shiny hat and oppressively respectable frock-coat.
+
+"Ha! Our party is complete," said Holmes, buttoning up his
+pea-jacket and taking his heavy hunting crop from the rack.
+"Watson, I think you know Mr. Jones, of Scotland Yard? Let me
+introduce you to Mr. Merryweather, who is to be our companion in
+to-night's adventure."
+
+"We're hunting in couples again, Doctor, you see," said Jones in
+his consequential way. "Our friend here is a wonderful man for
+starting a chase. All he wants is an old dog to help him to do
+the running down."
+
+"I hope a wild goose may not prove to be the end of our chase,"
+observed Mr. Merryweather gloomily.
+
+"You may place considerable confidence in Mr. Holmes, sir," said
+the police agent loftily. "He has his own little methods, which
+are, if he won't mind my saying so, just a little too theoretical
+and fantastic, but he has the makings of a detective in him. It
+is not too much to say that once or twice, as in that business of
+the Sholto murder and the Agra treasure, he has been more nearly
+correct than the official force."
+
+"Oh, if you say so, Mr. Jones, it is all right," said the
+stranger with deference. "Still, I confess that I miss my rubber.
+It is the first Saturday night for seven-and-twenty years that I
+have not had my rubber."
+
+"I think you will find," said Sherlock Holmes, "that you will
+play for a higher stake to-night than you have ever done yet, and
+that the play will be more exciting. For you, Mr. Merryweather,
+the stake will be some 30,000 pounds; and for you, Jones, it will
+be the man upon whom you wish to lay your hands."
+
+"John Clay, the murderer, thief, smasher, and forger. He's a
+young man, Mr. Merryweather, but he is at the head of his
+profession, and I would rather have my bracelets on him than on
+any criminal in London. He's a remarkable man, is young John
+Clay. His grandfather was a royal duke, and he himself has been
+to Eton and Oxford. His brain is as cunning as his fingers, and
+though we meet signs of him at every turn, we never know where to
+find the man himself. He'll crack a crib in Scotland one week,
+and be raising money to build an orphanage in Cornwall the next.
+I've been on his track for years and have never set eyes on him
+yet."
+
+"I hope that I may have the pleasure of introducing you to-night.
+I've had one or two little turns also with Mr. John Clay, and I
+agree with you that he is at the head of his profession. It is
+past ten, however, and quite time that we started. If you two
+will take the first hansom, Watson and I will follow in the
+second."
+
+Sherlock Holmes was not very communicative during the long drive
+and lay back in the cab humming the tunes which he had heard in
+the afternoon. We rattled through an endless labyrinth of gas-lit
+streets until we emerged into Farrington Street.
+
+"We are close there now," my friend remarked. "This fellow
+Merryweather is a bank director, and personally interested in the
+matter. I thought it as well to have Jones with us also. He is
+not a bad fellow, though an absolute imbecile in his profession.
+He has one positive virtue. He is as brave as a bulldog and as
+tenacious as a lobster if he gets his claws upon anyone. Here we
+are, and they are waiting for us."
+
+We had reached the same crowded thoroughfare in which we had
+found ourselves in the morning. Our cabs were dismissed, and,
+following the guidance of Mr. Merryweather, we passed down a
+narrow passage and through a side door, which he opened for us.
+Within there was a small corridor, which ended in a very massive
+iron gate. This also was opened, and led down a flight of winding
+stone steps, which terminated at another formidable gate. Mr.
+Merryweather stopped to light a lantern, and then conducted us
+down a dark, earth-smelling passage, and so, after opening a
+third door, into a huge vault or cellar, which was piled all
+round with crates and massive boxes.
+
+"You are not very vulnerable from above," Holmes remarked as he
+held up the lantern and gazed about him.
+
+"Nor from below," said Mr. Merryweather, striking his stick upon
+the flags which lined the floor. "Why, dear me, it sounds quite
+hollow!" he remarked, looking up in surprise.
+
+"I must really ask you to be a little more quiet!" said Holmes
+severely. "You have already imperilled the whole success of our
+expedition. Might I beg that you would have the goodness to sit
+down upon one of those boxes, and not to interfere?"
+
+The solemn Mr. Merryweather perched himself upon a crate, with a
+very injured expression upon his face, while Holmes fell upon his
+knees upon the floor and, with the lantern and a magnifying lens,
+began to examine minutely the cracks between the stones. A few
+seconds sufficed to satisfy him, for he sprang to his feet again
+and put his glass in his pocket.
+
+"We have at least an hour before us," he remarked, "for they can
+hardly take any steps until the good pawnbroker is safely in bed.
+Then they will not lose a minute, for the sooner they do their
+work the longer time they will have for their escape. We are at
+present, Doctor--as no doubt you have divined--in the cellar of
+the City branch of one of the principal London banks. Mr.
+Merryweather is the chairman of directors, and he will explain to
+you that there are reasons why the more daring criminals of
+London should take a considerable interest in this cellar at
+present."
+
+"It is our French gold," whispered the direct

<TRUNCATED>

[45/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
new file mode 100644
index 0000000..f4ee97f
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
@@ -0,0 +1,552 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.igfs;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteFileSystem;
+import org.apache.ignite.IgniteIllegalStateException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.igfs.IgfsBlockLocation;
+import org.apache.ignite.igfs.IgfsFile;
+import org.apache.ignite.igfs.IgfsPath;
+import org.apache.ignite.igfs.IgfsPathSummary;
+import org.apache.ignite.internal.processors.igfs.IgfsEx;
+import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
+import org.apache.ignite.internal.processors.igfs.IgfsStatus;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.SB;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.IgniteState.STARTED;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint.LOCALHOST;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP;
+import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
+
+/**
+ * Wrapper for IGFS server.
+ */
+public class HadoopIgfsWrapper implements HadoopIgfs {
+    /** Delegate. */
+    private final AtomicReference<Delegate> delegateRef = new AtomicReference<>();
+
+    /** Authority. */
+    private final String authority;
+
+    /** Connection string. */
+    private final HadoopIgfsEndpoint endpoint;
+
+    /** Log directory. */
+    private final String logDir;
+
+    /** Configuration. */
+    private final Configuration conf;
+
+    /** Logger. */
+    private final Log log;
+
+    /** The user name this wrapper works on behalf of. */
+    private final String userName;
+
+    /**
+     * Constructor.
+     *
+     * @param authority Authority (connection string).
+     * @param logDir Log directory for server.
+     * @param conf Configuration.
+     * @param log Current logger.
+     */
+    public HadoopIgfsWrapper(String authority, String logDir, Configuration conf, Log log, String user)
+        throws IOException {
+        try {
+            this.authority = authority;
+            this.endpoint = new HadoopIgfsEndpoint(authority);
+            this.logDir = logDir;
+            this.conf = conf;
+            this.log = log;
+            this.userName = user;
+        }
+        catch (IgniteCheckedException e) {
+            throw new IOException("Failed to parse endpoint: " + authority, e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsHandshakeResponse handshake(String logDir) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<IgfsHandshakeResponse>() {
+            @Override public IgfsHandshakeResponse apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) {
+                return hndResp;
+            }
+        });
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close(boolean force) {
+        Delegate delegate = delegateRef.get();
+
+        if (delegate != null && delegateRef.compareAndSet(delegate, null))
+            delegate.close(force);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile info(final IgfsPath path) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<IgfsFile>() {
+            @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.info(path);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsFile update(final IgfsPath path, final Map<String, String> props) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<IgfsFile>() {
+            @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.update(path, props);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean setTimes(final IgfsPath path, final long accessTime, final long modificationTime)
+        throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Boolean>() {
+            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.setTimes(path, accessTime, modificationTime);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean rename(final IgfsPath src, final IgfsPath dest) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Boolean>() {
+            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.rename(src, dest);
+            }
+        }, src);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean delete(final IgfsPath path, final boolean recursive) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Boolean>() {
+            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.delete(path, recursive);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsBlockLocation> affinity(final IgfsPath path, final long start,
+        final long len) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Collection<IgfsBlockLocation>>() {
+            @Override public Collection<IgfsBlockLocation> apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.affinity(path, start, len);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsPathSummary contentSummary(final IgfsPath path) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<IgfsPathSummary>() {
+            @Override public IgfsPathSummary apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.contentSummary(path);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Boolean mkdirs(final IgfsPath path, final Map<String, String> props) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Boolean>() {
+            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.mkdirs(path, props);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsFile> listFiles(final IgfsPath path) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Collection<IgfsFile>>() {
+            @Override public Collection<IgfsFile> apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.listFiles(path);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<IgfsPath> listPaths(final IgfsPath path) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<Collection<IgfsPath>>() {
+            @Override public Collection<IgfsPath> apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.listPaths(path);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgfsStatus fsStatus() throws IOException {
+        return withReconnectHandling(new FileSystemClosure<IgfsStatus>() {
+            @Override public IgfsStatus apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
+                throws IgniteCheckedException, IOException {
+                return hadoop.fsStatus();
+            }
+        });
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
+            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.open(path);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch)
+        throws IOException {
+        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
+            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.open(path, seqReadsBeforePrefetch);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate create(final IgfsPath path, final boolean overwrite,
+        final boolean colocate, final int replication, final long blockSize, @Nullable final Map<String, String> props)
+        throws IOException {
+        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
+            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.create(path, overwrite, colocate, replication, blockSize, props);
+            }
+        }, path);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopIgfsStreamDelegate append(final IgfsPath path, final boolean create,
+        @Nullable final Map<String, String> props) throws IOException {
+        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
+            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
+                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
+                return hadoop.append(path, create, props);
+            }
+        }, path);
+    }
+
+    /**
+     * Execute closure which is not path-specific.
+     *
+     * @param clo Closure.
+     * @return Result.
+     * @throws IOException If failed.
+     */
+    private <T> T withReconnectHandling(FileSystemClosure<T> clo) throws IOException {
+        return withReconnectHandling(clo, null);
+    }
+
+    /**
+     * Execute closure.
+     *
+     * @param clo Closure.
+     * @param path Path for exceptions.
+     * @return Result.
+     * @throws IOException If failed.
+     */
+    private <T> T withReconnectHandling(final FileSystemClosure<T> clo, @Nullable IgfsPath path)
+        throws IOException {
+        Exception err = null;
+
+        for (int i = 0; i < 2; i++) {
+            Delegate curDelegate = null;
+
+            boolean close = false;
+            boolean force = false;
+
+            try {
+                curDelegate = delegate();
+
+                assert curDelegate != null;
+
+                close = curDelegate.doomed;
+
+                return clo.apply(curDelegate.hadoop, curDelegate.hndResp);
+            }
+            catch (HadoopIgfsCommunicationException e) {
+                if (curDelegate != null && !curDelegate.doomed) {
+                    // Try getting rid fo faulty delegate ASAP.
+                    delegateRef.compareAndSet(curDelegate, null);
+
+                    close = true;
+                    force = true;
+                }
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to send message to a server: " + e);
+
+                err = e;
+            }
+            catch (IgniteCheckedException e) {
+                throw HadoopIgfsUtils.cast(e, path != null ? path.toString() : null);
+            }
+            finally {
+                if (close) {
+                    assert curDelegate != null;
+
+                    curDelegate.close(force);
+                }
+            }
+        }
+
+        List<Throwable> list = X.getThrowableList(err);
+
+        Throwable cause = list.get(list.size() - 1);
+
+        throw new IOException("Failed to communicate with IGFS: "
+            + (cause.getMessage() == null ? cause.toString() : cause.getMessage()), err);
+    }
+
+    /**
+     * Get delegate creating it if needed.
+     *
+     * @return Delegate.
+     */
+    private Delegate delegate() throws HadoopIgfsCommunicationException {
+        // These fields will contain possible exceptions from shmem and TCP endpoints.
+        Exception errShmem = null;
+        Exception errTcp = null;
+
+        // 1. If delegate is set, return it immediately.
+        Delegate curDelegate = delegateRef.get();
+
+        if (curDelegate != null)
+            return curDelegate;
+
+        // 2. Guess that we are in the same VM.
+        boolean skipInProc = parameter(conf, PARAM_IGFS_ENDPOINT_NO_EMBED, authority, false);
+
+        if (!skipInProc) {
+            IgfsEx igfs = getIgfsEx(endpoint.grid(), endpoint.igfs());
+
+            if (igfs != null) {
+                HadoopIgfsEx hadoop = null;
+
+                try {
+                    hadoop = new HadoopIgfsInProc(igfs, log, userName);
+
+                    curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
+                }
+                catch (IOException | IgniteCheckedException e) {
+                    if (e instanceof HadoopIgfsCommunicationException)
+                        if (hadoop != null)
+                            hadoop.close(true);
+
+                    if (log.isDebugEnabled())
+                        log.debug("Failed to connect to in-process IGFS, fallback to IPC mode.", e);
+                }
+            }
+        }
+
+        // 3. Try connecting using shmem.
+        boolean skipLocShmem = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority, false);
+
+        if (curDelegate == null && !skipLocShmem && !U.isWindows()) {
+            HadoopIgfsEx hadoop = null;
+
+            try {
+                hadoop = new HadoopIgfsOutProc(endpoint.port(), endpoint.grid(), endpoint.igfs(), log, userName);
+
+                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
+            }
+            catch (IOException | IgniteCheckedException e) {
+                if (e instanceof HadoopIgfsCommunicationException)
+                    hadoop.close(true);
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to connect to IGFS using shared memory [port=" + endpoint.port() + ']', e);
+
+                errShmem = e;
+            }
+        }
+
+        // 4. Try local TCP connection.
+        boolean skipLocTcp = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority, false);
+
+        if (curDelegate == null && !skipLocTcp) {
+            HadoopIgfsEx hadoop = null;
+
+            try {
+                hadoop = new HadoopIgfsOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.igfs(),
+                    log, userName);
+
+                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
+            }
+            catch (IOException | IgniteCheckedException e) {
+                if (e instanceof HadoopIgfsCommunicationException)
+                    hadoop.close(true);
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() +
+                        ", port=" + endpoint.port() + ']', e);
+
+                errTcp = e;
+            }
+        }
+
+        // 5. Try remote TCP connection.
+        if (curDelegate == null && (skipLocTcp || !F.eq(LOCALHOST, endpoint.host()))) {
+            HadoopIgfsEx hadoop = null;
+
+            try {
+                hadoop = new HadoopIgfsOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.igfs(),
+                    log, userName);
+
+                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
+            }
+            catch (IOException | IgniteCheckedException e) {
+                if (e instanceof HadoopIgfsCommunicationException)
+                    hadoop.close(true);
+
+                if (log.isDebugEnabled())
+                    log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() +
+                        ", port=" + endpoint.port() + ']', e);
+
+                errTcp = e;
+            }
+        }
+
+        if (curDelegate != null) {
+            if (!delegateRef.compareAndSet(null, curDelegate))
+                curDelegate.doomed = true;
+
+            return curDelegate;
+        }
+        else {
+            SB errMsg = new SB("Failed to connect to IGFS [endpoint=igfs://" + authority + ", attempts=[");
+
+            if (errShmem != null)
+                errMsg.a("[type=SHMEM, port=" + endpoint.port() + ", err=" + errShmem + "], ");
+
+            errMsg.a("[type=TCP, host=" + endpoint.host() + ", port=" + endpoint.port() + ", err=" + errTcp + "]] ");
+
+            errMsg.a("(ensure that IGFS is running and have IPC endpoint enabled; ensure that " +
+                "ignite-shmem-1.0.0.jar is in Hadoop classpath if you use shared memory endpoint).");
+
+            throw new HadoopIgfsCommunicationException(errMsg.toString());
+        }
+    }
+
+    /**
+     * File system operation closure.
+     */
+    private static interface FileSystemClosure<T> {
+        /**
+         * Call closure body.
+         *
+         * @param hadoop RPC handler.
+         * @param hndResp Handshake response.
+         * @return Result.
+         * @throws IgniteCheckedException If failed.
+         * @throws IOException If failed.
+         */
+        public T apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException;
+    }
+
+    /**
+     * Delegate.
+     */
+    private static class Delegate {
+        /** RPC handler. */
+        private final HadoopIgfsEx hadoop;
+
+        /** Handshake request. */
+        private final IgfsHandshakeResponse hndResp;
+
+        /** Close guard. */
+        private final AtomicBoolean closeGuard = new AtomicBoolean();
+
+        /** Whether this delegate must be closed at the end of the next invocation. */
+        private boolean doomed;
+
+        /**
+         * Constructor.
+         *
+         * @param hadoop Hadoop.
+         * @param hndResp Handshake response.
+         */
+        private Delegate(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) {
+            this.hadoop = hadoop;
+            this.hndResp = hndResp;
+        }
+
+        /**
+         * Close underlying RPC handler.
+         *
+         * @param force Force flag.
+         */
+        private void close(boolean force) {
+            if (closeGuard.compareAndSet(false, true))
+                hadoop.close(force);
+        }
+    }
+
+    /**
+     * Helper method to find Igfs of the given name in the given Ignite instance.
+     *
+     * @param gridName The name of the grid to check.
+     * @param igfsName The name of Igfs.
+     * @return The file system instance, or null if not found.
+     */
+    private static IgfsEx getIgfsEx(@Nullable String gridName, @Nullable String igfsName) {
+        if (Ignition.state(gridName) == STARTED) {
+            try {
+                for (IgniteFileSystem fs : Ignition.ignite(gridName).fileSystems()) {
+                    if (F.eq(fs.name(), igfsName))
+                        return (IgfsEx)fs;
+                }
+            }
+            catch (IgniteIllegalStateException ignore) {
+                // May happen if the grid state has changed:
+            }
+        }
+
+        return null;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
new file mode 100644
index 0000000..090b336
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.jobtracker;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Collection;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
+import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
+import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_SETUP;
+
+/**
+ * Hadoop job metadata. Internal object used for distributed job state tracking.
+ */
+public class HadoopJobMetadata implements Externalizable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Job ID. */
+    private HadoopJobId jobId;
+
+    /** Job info. */
+    private HadoopJobInfo jobInfo;
+
+    /** Node submitted job. */
+    private UUID submitNodeId;
+
+    /** Map-reduce plan. */
+    private HadoopMapReducePlan mrPlan;
+
+    /** Pending splits for which mapper should be executed. */
+    private Map<HadoopInputSplit, Integer> pendingSplits;
+
+    /** Pending reducers. */
+    private Collection<Integer> pendingReducers;
+
+    /** Reducers addresses. */
+    @GridToStringInclude
+    private Map<Integer, HadoopProcessDescriptor> reducersAddrs;
+
+    /** Job phase. */
+    private HadoopJobPhase phase = PHASE_SETUP;
+
+    /** Fail cause. */
+    @GridToStringExclude
+    private Throwable failCause;
+
+    /** Version. */
+    private long ver;
+
+    /** Job counters */
+    private HadoopCounters counters = new HadoopCountersImpl();
+
+    /**
+     * Empty constructor required by {@link Externalizable}.
+     */
+    public HadoopJobMetadata() {
+        // No-op.
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param submitNodeId Submit node ID.
+     * @param jobId Job ID.
+     * @param jobInfo Job info.
+     */
+    public HadoopJobMetadata(UUID submitNodeId, HadoopJobId jobId, HadoopJobInfo jobInfo) {
+        this.jobId = jobId;
+        this.jobInfo = jobInfo;
+        this.submitNodeId = submitNodeId;
+    }
+
+    /**
+     * Copy constructor.
+     *
+     * @param src Metadata to copy.
+     */
+    public HadoopJobMetadata(HadoopJobMetadata src) {
+        // Make sure to preserve alphabetic order.
+        counters = src.counters;
+        failCause = src.failCause;
+        jobId = src.jobId;
+        jobInfo = src.jobInfo;
+        mrPlan = src.mrPlan;
+        pendingSplits = src.pendingSplits;
+        pendingReducers = src.pendingReducers;
+        phase = src.phase;
+        reducersAddrs = src.reducersAddrs;
+        submitNodeId = src.submitNodeId;
+        ver = src.ver + 1;
+    }
+
+    /**
+     * @return Submit node ID.
+     */
+    public UUID submitNodeId() {
+        return submitNodeId;
+    }
+
+    /**
+     * @param phase Job phase.
+     */
+    public void phase(HadoopJobPhase phase) {
+        this.phase = phase;
+    }
+
+    /**
+     * @return Job phase.
+     */
+    public HadoopJobPhase phase() {
+        return phase;
+    }
+
+    /**
+     * Gets reducers addresses for external execution.
+     *
+     * @return Reducers addresses.
+     */
+    public Map<Integer, HadoopProcessDescriptor> reducersAddresses() {
+        return reducersAddrs;
+    }
+
+    /**
+     * Sets reducers addresses for external execution.
+     *
+     * @param reducersAddrs Map of addresses.
+     */
+    public void reducersAddresses(Map<Integer, HadoopProcessDescriptor> reducersAddrs) {
+        this.reducersAddrs = reducersAddrs;
+    }
+
+    /**
+     * Sets collection of pending splits.
+     *
+     * @param pendingSplits Collection of pending splits.
+     */
+    public void pendingSplits(Map<HadoopInputSplit, Integer> pendingSplits) {
+        this.pendingSplits = pendingSplits;
+    }
+
+    /**
+     * Gets collection of pending splits.
+     *
+     * @return Collection of pending splits.
+     */
+    public Map<HadoopInputSplit, Integer> pendingSplits() {
+        return pendingSplits;
+    }
+
+    /**
+     * Sets collection of pending reducers.
+     *
+     * @param pendingReducers Collection of pending reducers.
+     */
+    public void pendingReducers(Collection<Integer> pendingReducers) {
+        this.pendingReducers = pendingReducers;
+    }
+
+    /**
+     * Gets collection of pending reducers.
+     *
+     * @return Collection of pending reducers.
+     */
+    public Collection<Integer> pendingReducers() {
+        return pendingReducers;
+    }
+
+    /**
+     * @return Job ID.
+     */
+    public HadoopJobId jobId() {
+        return jobId;
+    }
+
+    /**
+     * @param mrPlan Map-reduce plan.
+     */
+    public void mapReducePlan(HadoopMapReducePlan mrPlan) {
+        assert this.mrPlan == null : "Map-reduce plan can only be initialized once.";
+
+        this.mrPlan = mrPlan;
+    }
+
+    /**
+     * @return Map-reduce plan.
+     */
+    public HadoopMapReducePlan mapReducePlan() {
+        return mrPlan;
+    }
+
+    /**
+     * @return Job info.
+     */
+    public HadoopJobInfo jobInfo() {
+        return jobInfo;
+    }
+
+    /**
+     * Returns job counters.
+     *
+     * @return Collection of counters.
+     */
+    public HadoopCounters counters() {
+        return counters;
+    }
+
+    /**
+     * Sets counters.
+     *
+     * @param counters Collection of counters.
+     */
+    public void counters(HadoopCounters counters) {
+        this.counters = counters;
+    }
+
+    /**
+     * @param failCause Fail cause.
+     */
+    public void failCause(Throwable failCause) {
+        assert failCause != null;
+
+        if (this.failCause == null) // Keep the first error.
+            this.failCause = failCause;
+    }
+
+    /**
+     * @return Fail cause.
+     */
+    public Throwable failCause() {
+        return failCause;
+    }
+
+    /**
+     * @return Version.
+     */
+    public long version() {
+        return ver;
+    }
+
+    /**
+     * @param split Split.
+     * @return Task number.
+     */
+    public int taskNumber(HadoopInputSplit split) {
+        return pendingSplits.get(split);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        U.writeUuid(out, submitNodeId);
+        out.writeObject(jobId);
+        out.writeObject(jobInfo);
+        out.writeObject(mrPlan);
+        out.writeObject(pendingSplits);
+        out.writeObject(pendingReducers);
+        out.writeObject(phase);
+        out.writeObject(failCause);
+        out.writeLong(ver);
+        out.writeObject(reducersAddrs);
+        out.writeObject(counters);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        submitNodeId = U.readUuid(in);
+        jobId = (HadoopJobId)in.readObject();
+        jobInfo = (HadoopJobInfo)in.readObject();
+        mrPlan = (HadoopMapReducePlan)in.readObject();
+        pendingSplits = (Map<HadoopInputSplit,Integer>)in.readObject();
+        pendingReducers = (Collection<Integer>)in.readObject();
+        phase = (HadoopJobPhase)in.readObject();
+        failCause = (Throwable)in.readObject();
+        ver = in.readLong();
+        reducersAddrs = (Map<Integer, HadoopProcessDescriptor>)in.readObject();
+        counters = (HadoopCounters)in.readObject();
+    }
+
+    /** {@inheritDoc} */
+    public String toString() {
+        return S.toString(HadoopJobMetadata.class, this, "pendingMaps", pendingSplits.size(),
+            "pendingReduces", pendingReducers.size(), "failCause", failCause == null ? null :
+                failCause.getClass().getName());
+    }
+}
\ No newline at end of file


[06/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
deleted file mode 100644
index bb155b4..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem;
-import org.apache.ignite.internal.processors.igfs.IgfsBlockKey;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.processors.igfs.IgfsEntryInfo;
-import org.apache.ignite.internal.processors.igfs.IgfsImpl;
-import org.apache.ignite.internal.processors.igfs.IgfsMetaManager;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.jetbrains.annotations.Nullable;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.Callable;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH;
-import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.awaitFileClose;
-import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.clear;
-import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.create;
-
-/**
- * Tests for IGFS working in mode when remote file system exists: DUAL_SYNC, DUAL_ASYNC.
- */
-public abstract class HadoopIgfsDualAbstractSelfTest extends IgfsCommonAbstractTest {
-    /** IGFS block size. */
-    protected static final int IGFS_BLOCK_SIZE = 512 * 1024;
-
-    /** Amount of blocks to prefetch. */
-    protected static final int PREFETCH_BLOCKS = 1;
-
-    /** Amount of sequential block reads before prefetch is triggered. */
-    protected static final int SEQ_READS_BEFORE_PREFETCH = 2;
-
-    /** Secondary file system URI. */
-    protected static final String SECONDARY_URI = "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/";
-
-    /** Secondary file system configuration path. */
-    protected static final String SECONDARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml";
-
-    /** Primary file system URI. */
-    protected static final String PRIMARY_URI = "igfs://igfs:grid@/";
-
-    /** Primary file system configuration path. */
-    protected static final String PRIMARY_CFG = "modules/core/src/test/config/hadoop/core-site-loopback.xml";
-
-    /** Primary file system REST endpoint configuration map. */
-    protected static final IgfsIpcEndpointConfiguration PRIMARY_REST_CFG;
-
-    /** Secondary file system REST endpoint configuration map. */
-    protected static final IgfsIpcEndpointConfiguration SECONDARY_REST_CFG;
-
-    /** Directory. */
-    protected static final IgfsPath DIR = new IgfsPath("/dir");
-
-    /** Sub-directory. */
-    protected static final IgfsPath SUBDIR = new IgfsPath(DIR, "subdir");
-
-    /** File. */
-    protected static final IgfsPath FILE = new IgfsPath(SUBDIR, "file");
-
-    /** Default data chunk (128 bytes). */
-    protected static byte[] chunk;
-
-    /** Primary IGFS. */
-    protected static IgfsImpl igfs;
-
-    /** Secondary IGFS. */
-    protected static IgfsImpl igfsSecondary;
-
-    /** IGFS mode. */
-    protected final IgfsMode mode;
-
-    static {
-        PRIMARY_REST_CFG = new IgfsIpcEndpointConfiguration();
-
-        PRIMARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
-        PRIMARY_REST_CFG.setPort(10500);
-
-        SECONDARY_REST_CFG = new IgfsIpcEndpointConfiguration();
-
-        SECONDARY_REST_CFG.setType(IgfsIpcEndpointType.TCP);
-        SECONDARY_REST_CFG.setPort(11500);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param mode IGFS mode.
-     */
-    protected HadoopIgfsDualAbstractSelfTest(IgfsMode mode) {
-        this.mode = mode;
-        assert mode == DUAL_SYNC || mode == DUAL_ASYNC;
-    }
-
-    /**
-     * Start grid with IGFS.
-     *
-     * @param gridName Grid name.
-     * @param igfsName IGFS name
-     * @param mode IGFS mode.
-     * @param secondaryFs Secondary file system (optional).
-     * @param restCfg Rest configuration string (optional).
-     * @return Started grid instance.
-     * @throws Exception If failed.
-     */
-    protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode,
-        @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable IgfsIpcEndpointConfiguration restCfg) throws Exception {
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("dataCache");
-        igfsCfg.setMetaCacheName("metaCache");
-        igfsCfg.setName(igfsName);
-        igfsCfg.setBlockSize(IGFS_BLOCK_SIZE);
-        igfsCfg.setDefaultMode(mode);
-        igfsCfg.setIpcEndpointConfiguration(restCfg);
-        igfsCfg.setSecondaryFileSystem(secondaryFs);
-        igfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS);
-        igfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH);
-
-        CacheConfiguration dataCacheCfg = defaultCacheConfiguration();
-
-        dataCacheCfg.setName("dataCache");
-        dataCacheCfg.setCacheMode(PARTITIONED);
-        dataCacheCfg.setNearConfiguration(null);
-        dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(2));
-        dataCacheCfg.setBackups(0);
-        dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
-        dataCacheCfg.setOffHeapMaxMemory(0);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("metaCache");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setConnectorConfiguration(null);
-
-        return G.start(cfg);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        chunk = new byte[128];
-
-        for (int i = 0; i < chunk.length; i++)
-            chunk[i] = (byte)i;
-
-        Ignite igniteSecondary = startGridWithIgfs("grid-secondary", "igfs-secondary", PRIMARY, null, SECONDARY_REST_CFG);
-
-        IgfsSecondaryFileSystem hadoopFs = new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG);
-
-        Ignite ignite = startGridWithIgfs("grid", "igfs", mode, hadoopFs, PRIMARY_REST_CFG);
-
-        igfsSecondary = (IgfsImpl) igniteSecondary.fileSystem("igfs-secondary");
-        igfs = (IgfsImpl) ignite.fileSystem("igfs");
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        clear(igfs);
-        clear(igfsSecondary);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-    }
-
-    /**
-     * Convenient method to group paths.
-     *
-     * @param paths Paths to group.
-     * @return Paths as array.
-     */
-    protected IgfsPath[] paths(IgfsPath... paths) {
-        return paths;
-    }
-
-    /**
-     * Check how prefetch override works.
-     *
-     * @throws Exception IF failed.
-     */
-    public void testOpenPrefetchOverride() throws Exception {
-        create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
-
-        // Write enough data to the secondary file system.
-        final int blockSize = IGFS_BLOCK_SIZE;
-
-        IgfsOutputStream out = igfsSecondary.append(FILE, false);
-
-        int totalWritten = 0;
-
-        while (totalWritten < blockSize * 2 + chunk.length) {
-            out.write(chunk);
-
-            totalWritten += chunk.length;
-        }
-
-        out.close();
-
-        awaitFileClose(igfsSecondary, FILE);
-
-        // Instantiate file system with overridden "seq reads before prefetch" property.
-        Configuration cfg = new Configuration();
-
-        cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
-
-        int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
-
-        cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);
-
-        FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
-
-        // Read the first two blocks.
-        Path fsHome = new Path(PRIMARY_URI);
-        Path dir = new Path(fsHome, DIR.name());
-        Path subdir = new Path(dir, SUBDIR.name());
-        Path file = new Path(subdir, FILE.name());
-
-        FSDataInputStream fsIn = fs.open(file);
-
-        final byte[] readBuf = new byte[blockSize * 2];
-
-        fsIn.readFully(0, readBuf, 0, readBuf.length);
-
-        // Wait for a while for prefetch to finish (if any).
-        IgfsMetaManager meta = igfs.context().meta();
-
-        IgfsEntryInfo info = meta.info(meta.fileId(FILE));
-
-        IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
-
-        IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(
-            igfs.configuration().getDataCacheName());
-
-        for (int i = 0; i < 10; i++) {
-            if (dataCache.containsKey(key))
-                break;
-            else
-                U.sleep(100);
-        }
-
-        fsIn.close();
-
-        // Remove the file from the secondary file system.
-        igfsSecondary.delete(FILE, false);
-
-        // Try reading the third block. Should fail.
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                IgfsInputStream in0 = igfs.open(FILE);
-
-                in0.seek(blockSize * 2);
-
-                try {
-                    in0.read(readBuf);
-                }
-                finally {
-                    U.closeQuiet(in0);
-                }
-
-                return null;
-            }
-        }, IOException.class,
-            "Failed to read data due to secondary file system exception: /dir/subdir/file");
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
deleted file mode 100644
index 6c6e709..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-
-/**
- * Tests for DUAL_ASYNC mode.
- */
-public class HadoopIgfsDualAsyncSelfTest extends HadoopIgfsDualAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public HadoopIgfsDualAsyncSelfTest() {
-        super(DUAL_ASYNC);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
deleted file mode 100644
index 96a63d5..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-
-/**
- * Tests for DUAL_SYNC mode.
- */
-public class HadoopIgfsDualSyncSelfTest extends HadoopIgfsDualAbstractSelfTest {
-    /**
-     * Constructor.
-     */
-    public HadoopIgfsDualSyncSelfTest() {
-        super(DUAL_SYNC);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
deleted file mode 100644
index f7af6f0..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsSecondaryFileSystemTestAdapter.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.hadoop.fs.HadoopFileSystemFactory;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.processors.igfs.IgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsSecondaryFileSystemTestAdapter;
-import org.apache.ignite.internal.util.typedef.T2;
-
-/**
- * Universal adapter wrapping {@link org.apache.hadoop.fs.FileSystem} instance.
- */
-public class HadoopIgfsSecondaryFileSystemTestAdapter implements IgfsSecondaryFileSystemTestAdapter {
-    /** File system factory. */
-    private final HadoopFileSystemFactory factory;
-
-    /**
-     * Constructor.
-     * @param factory File system factory.
-     */
-    public HadoopIgfsSecondaryFileSystemTestAdapter(HadoopFileSystemFactory factory) {
-        assert factory != null;
-
-        this.factory = factory;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String name() throws IOException {
-        return get().getUri().toString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean exists(String path) throws IOException {
-        return get().exists(new Path(path));
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean delete(String path, boolean recursive) throws IOException {
-        return get().delete(new Path(path), recursive);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void mkdirs(String path) throws IOException {
-        boolean ok = get().mkdirs(new Path(path));
-        if (!ok)
-            throw new IOException("Failed to mkdirs: " + path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void format() throws IOException {
-        HadoopIgfsUtils.clear(get());
-    }
-
-    /** {@inheritDoc} */
-    @Override public Map<String, String> properties(String path) throws IOException {
-        Path p = new Path(path);
-
-        FileStatus status = get().getFileStatus(p);
-
-        Map<String,String> m = new HashMap<>(3);
-
-        m.put(IgfsUtils.PROP_USER_NAME, status.getOwner());
-        m.put(IgfsUtils.PROP_GROUP_NAME, status.getGroup());
-        m.put(IgfsUtils.PROP_PERMISSION, permission(status));
-
-        return m;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String permissions(String path) throws IOException {
-        return permission(get().getFileStatus(new Path(path)));
-    }
-
-    /**
-     * Get permission for file status.
-     *
-     * @param status Status.
-     * @return Permission.
-     */
-    private String permission(FileStatus status) {
-        FsPermission perm = status.getPermission();
-
-        return "0" + perm.getUserAction().ordinal() + perm.getGroupAction().ordinal() + perm.getOtherAction().ordinal();
-    }
-
-    /** {@inheritDoc} */
-    @Override public InputStream openInputStream(String path) throws IOException {
-        return get().open(new Path(path));
-    }
-
-    /** {@inheritDoc} */
-    @Override public OutputStream openOutputStream(String path, boolean append) throws IOException {
-        Path p = new Path(path);
-
-        if (append)
-            return get().append(p);
-        else
-            return get().create(p, true/*overwrite*/);
-    }
-
-    /** {@inheritDoc} */
-    @Override public T2<Long, Long> times(String path) throws IOException {
-        FileStatus status = get().getFileStatus(new Path(path));
-
-        return new T2<>(status.getAccessTime(), status.getModificationTime());
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsEx igfs() {
-        return null;
-    }
-
-    /**
-     * Create file system.
-     *
-     * @return File system.
-     * @throws IOException If failed.
-     */
-    protected FileSystem get() throws IOException {
-        return factory.get(FileSystemConfiguration.DFLT_USER_NAME);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
deleted file mode 100644
index d9b5d66..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.CachingHadoopFileSystemFactory;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem;
-import org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils;
-import org.apache.ignite.internal.processors.igfs.IgfsCommonAbstractTest;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.communication.CommunicationSpi;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.util.concurrent.Callable;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.cache.CacheMode.REPLICATED;
-import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-import static org.apache.ignite.igfs.IgfsMode.PROXY;
-import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT;
-
-/**
- * Tests secondary file system configuration.
- */
-public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstractTest {
-    /** IGFS scheme */
-    static final String IGFS_SCHEME = "igfs";
-
-    /** Primary file system authority. */
-    private static final String PRIMARY_AUTHORITY = "igfs:grid0@";
-
-    /** Autogenerated secondary file system configuration path. */
-    private static final String PRIMARY_CFG_PATH = "/work/core-site-primary-test.xml";
-
-    /** Secondary file system authority. */
-    private static final String SECONDARY_AUTHORITY = "igfs_secondary:grid_secondary@127.0.0.1:11500";
-
-    /** Autogenerated secondary file system configuration path. */
-    static final String SECONDARY_CFG_PATH = "/work/core-site-test.xml";
-
-    /** Secondary endpoint configuration. */
-    protected static final IgfsIpcEndpointConfiguration SECONDARY_ENDPOINT_CFG;
-
-    /** Group size. */
-    public static final int GRP_SIZE = 128;
-
-    /** IP finder. */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Primary file system URI. */
-    protected URI primaryFsUri;
-
-    /** Primary file system. */
-    private FileSystem primaryFs;
-
-    /** Full path of primary Fs configuration */
-    private String primaryConfFullPath;
-
-    /** Input primary Fs uri */
-    private String primaryFsUriStr;
-
-    /** Input URI scheme for configuration */
-    private String primaryCfgScheme;
-
-    /** Input URI authority for configuration */
-    private String primaryCfgAuthority;
-
-    /** if to pass configuration */
-    private boolean passPrimaryConfiguration;
-
-    /** Full path of s Fs configuration */
-    private String secondaryConfFullPath;
-
-    /** /Input URI scheme for configuration */
-    private String secondaryFsUriStr;
-
-    /** Input URI scheme for configuration */
-    private String secondaryCfgScheme;
-
-    /** Input URI authority for configuration */
-    private String secondaryCfgAuthority;
-
-    /** if to pass configuration */
-    private boolean passSecondaryConfiguration;
-
-    /** Default IGFS mode. */
-    protected final IgfsMode mode;
-
-    /** Skip embedded mode flag. */
-    private final boolean skipEmbed;
-
-    /** Skip local shmem flag. */
-    private final boolean skipLocShmem;
-
-    static {
-        SECONDARY_ENDPOINT_CFG = new IgfsIpcEndpointConfiguration();
-
-        SECONDARY_ENDPOINT_CFG.setType(IgfsIpcEndpointType.TCP);
-        SECONDARY_ENDPOINT_CFG.setPort(11500);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param mode Default IGFS mode.
-     * @param skipEmbed Whether to skip embedded mode.
-     * @param skipLocShmem Whether to skip local shmem mode.
-     */
-    protected HadoopSecondaryFileSystemConfigurationTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) {
-        this.mode = mode;
-        this.skipEmbed = skipEmbed;
-        this.skipLocShmem = skipLocShmem;
-    }
-
-    /**
-     * Default constructor.
-     */
-    public HadoopSecondaryFileSystemConfigurationTest() {
-        this(PROXY, true, false);
-    }
-
-    /**
-     * Executes before each test.
-     * @throws Exception
-     */
-    private void before() throws Exception {
-        initSecondary();
-
-        if (passPrimaryConfiguration) {
-            Configuration primaryFsCfg = configuration(primaryCfgScheme, primaryCfgAuthority, skipEmbed, skipLocShmem);
-
-            primaryConfFullPath = writeConfiguration(primaryFsCfg, PRIMARY_CFG_PATH);
-        }
-        else
-            primaryConfFullPath = null;
-
-        CachingHadoopFileSystemFactory fac = new CachingHadoopFileSystemFactory();
-
-        fac.setConfigPaths(primaryConfFullPath);
-        fac.setUri(primaryFsUriStr);
-
-        fac.start();
-
-        primaryFs = fac.get(null); //provider.createFileSystem(null);
-
-        primaryFsUri = primaryFs.getUri();
-    }
-
-    /**
-     * Executes after each test.
-     * @throws Exception
-     */
-    private void after() throws Exception {
-        if (primaryFs != null) {
-            try {
-                primaryFs.delete(new Path("/"), true);
-            }
-            catch (Exception ignore) {
-                // No-op.
-            }
-
-            U.closeQuiet(primaryFs);
-        }
-
-        G.stopAll(true);
-
-        delete(primaryConfFullPath);
-        delete(secondaryConfFullPath);
-    }
-
-    /**
-     * Utility method to delete file.
-     *
-     * @param file the file path to delete.
-     */
-    @SuppressWarnings("ResultOfMethodCallIgnored")
-    private static void delete(String file) {
-        if (file != null) {
-            new File(file).delete();
-
-            assertFalse(new File(file).exists());
-        }
-    }
-
-    /**
-     * Initialize underlying secondary filesystem.
-     *
-     * @throws Exception
-     */
-    private void initSecondary() throws Exception {
-        if (passSecondaryConfiguration) {
-            Configuration secondaryConf = configuration(secondaryCfgScheme, secondaryCfgAuthority, true, true);
-
-            secondaryConf.setInt("fs.igfs.block.size", 1024);
-
-            secondaryConfFullPath = writeConfiguration(secondaryConf, SECONDARY_CFG_PATH);
-        }
-        else
-            secondaryConfFullPath = null;
-
-        startNodes();
-    }
-
-    /**
-     * Starts the nodes for this test.
-     *
-     * @throws Exception If failed.
-     */
-    private void startNodes() throws Exception {
-        if (mode != PRIMARY)
-            startSecondary();
-
-        startGrids(4);
-    }
-
-    /**
-     * Starts secondary IGFS
-     */
-    private void startSecondary() {
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("partitioned");
-        igfsCfg.setMetaCacheName("replicated");
-        igfsCfg.setName("igfs_secondary");
-        igfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG);
-        igfsCfg.setBlockSize(512 * 1024);
-        igfsCfg.setPrefetchBlocks(1);
-
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setGridName("grid_secondary");
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
-        cfg.setFileSystemConfiguration(igfsCfg);
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-
-        cfg.setCommunicationSpi(communicationSpi());
-
-        G.start(cfg);
-    }
-
-    /**
-     * Get primary IPC endpoint configuration.
-     *
-     * @param gridName Grid name.
-     * @return IPC primary endpoint configuration.
-     */
-    protected IgfsIpcEndpointConfiguration primaryIpcEndpointConfiguration(final String gridName) {
-        IgfsIpcEndpointConfiguration cfg = new IgfsIpcEndpointConfiguration();
-
-        cfg.setType(IgfsIpcEndpointType.TCP);
-        cfg.setPort(DFLT_IPC_PORT + getTestGridIndex(gridName));
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getTestGridName() {
-        return "grid";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
-
-        discoSpi.setIpFinder(IP_FINDER);
-
-        cfg.setDiscoverySpi(discoSpi);
-        cfg.setCacheConfiguration(cacheConfiguration());
-        cfg.setFileSystemConfiguration(fsConfiguration(gridName));
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-        cfg.setCommunicationSpi(communicationSpi());
-
-        return cfg;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @return Cache configuration.
-     */
-    protected CacheConfiguration[] cacheConfiguration() {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName("partitioned");
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setNearConfiguration(null);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
-
-        metaCacheCfg.setName("replicated");
-        metaCacheCfg.setCacheMode(REPLICATED);
-        metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
-    }
-
-    /**
-     * Gets IGFS configuration.
-     *
-     * @param gridName Grid name.
-     * @return IGFS configuration.
-     */
-    protected FileSystemConfiguration fsConfiguration(String gridName) throws IgniteCheckedException {
-        FileSystemConfiguration cfg = new FileSystemConfiguration();
-
-        cfg.setDataCacheName("partitioned");
-        cfg.setMetaCacheName("replicated");
-        cfg.setName("igfs");
-        cfg.setPrefetchBlocks(1);
-        cfg.setDefaultMode(mode);
-
-        if (mode != PRIMARY)
-            cfg.setSecondaryFileSystem(
-                new IgniteHadoopIgfsSecondaryFileSystem(secondaryFsUriStr, secondaryConfFullPath));
-
-        cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName));
-
-        cfg.setManagementPort(-1);
-        cfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
-
-        return cfg;
-    }
-
-    /** @return Communication SPI. */
-    private CommunicationSpi communicationSpi() {
-        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
-
-        commSpi.setSharedMemoryPort(-1);
-
-        return commSpi;
-    }
-
-    /**
-     * Case #SecondaryFileSystemProvider(null, path)
-     *
-     * @throws Exception On failure.
-     */
-    public void testFsConfigurationOnly() throws Exception {
-        primaryCfgScheme = IGFS_SCHEME;
-        primaryCfgAuthority = PRIMARY_AUTHORITY;
-        passPrimaryConfiguration = true;
-        primaryFsUriStr = null;
-
-        // wrong secondary URI in the configuration:
-        secondaryCfgScheme = IGFS_SCHEME;
-        secondaryCfgAuthority = SECONDARY_AUTHORITY;
-        passSecondaryConfiguration = true;
-        secondaryFsUriStr = null;
-
-        check();
-    }
-
-    /**
-     * Case #SecondaryFileSystemProvider(uri, path), when 'uri' parameter overrides
-     * the Fs uri set in the configuration.
-     *
-     * @throws Exception On failure.
-     */
-    public void testFsUriOverridesUriInConfiguration() throws Exception {
-        // wrong primary URI in the configuration:
-        primaryCfgScheme = "foo";
-        primaryCfgAuthority = "moo:zoo@bee";
-        passPrimaryConfiguration = true;
-        primaryFsUriStr = mkUri(IGFS_SCHEME, PRIMARY_AUTHORITY);
-
-        // wrong secondary URI in the configuration:
-        secondaryCfgScheme = "foo";
-        secondaryCfgAuthority = "moo:zoo@bee";
-        passSecondaryConfiguration = true;
-        secondaryFsUriStr = mkUri(IGFS_SCHEME, SECONDARY_AUTHORITY);
-
-        check();
-    }
-
-    /**
-     * Perform actual check.
-     *
-     * @throws Exception If failed.
-     */
-    @SuppressWarnings("deprecation")
-    private void check() throws Exception {
-        before();
-
-        try {
-            Path fsHome = new Path(primaryFsUri);
-            Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
-            Path file = new Path(dir, "someFile");
-
-            assertPathDoesNotExist(primaryFs, file);
-
-            FsPermission fsPerm = new FsPermission((short)644);
-
-            FSDataOutputStream os = primaryFs.create(file, fsPerm, false, 1, (short)1, 1L, null);
-
-            // Try to write something in file.
-            os.write("abc".getBytes());
-
-            os.close();
-
-            // Check file status.
-            FileStatus fileStatus = primaryFs.getFileStatus(file);
-
-            assertFalse(fileStatus.isDir());
-            assertEquals(file, fileStatus.getPath());
-            assertEquals(fsPerm, fileStatus.getPermission());
-        }
-        finally {
-            after();
-        }
-    }
-
-    /**
-     * Create configuration for test.
-     *
-     * @param skipEmbed Whether to skip embedded mode.
-     * @param skipLocShmem Whether to skip local shmem mode.
-     * @return Configuration.
-     */
-    static Configuration configuration(String scheme, String authority, boolean skipEmbed, boolean skipLocShmem) {
-        final Configuration cfg = new Configuration();
-
-        if (scheme != null && authority != null)
-            cfg.set("fs.defaultFS", scheme + "://" + authority + "/");
-
-        setImplClasses(cfg);
-
-        if (authority != null) {
-            if (skipEmbed)
-                cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true);
-
-            if (skipLocShmem)
-                cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true);
-        }
-
-        return cfg;
-    }
-
-    /**
-     * Sets Hadoop Fs implementation classes.
-     *
-     * @param cfg the configuration to set parameters into.
-     */
-    static void setImplClasses(Configuration cfg) {
-        cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
-
-        cfg.set("fs.AbstractFileSystem.igfs.impl",
-            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());
-    }
-
-    /**
-     * Check path does not exist in a given FileSystem.
-     *
-     * @param fs FileSystem to check.
-     * @param path Path to check.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    private void assertPathDoesNotExist(final FileSystem fs, final Path path) {
-        GridTestUtils.assertThrows(log, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                return fs.getFileStatus(path);
-            }
-        }, FileNotFoundException.class, null);
-    }
-
-    /**
-     * Writes down the configuration to local disk and returns its path.
-     *
-     * @param cfg the configuration to write.
-     * @param pathFromIgniteHome path relatively to Ignite home.
-     * @return Full path of the written configuration.
-     */
-    static String writeConfiguration(Configuration cfg, String pathFromIgniteHome) throws IOException {
-        if (!pathFromIgniteHome.startsWith("/"))
-            pathFromIgniteHome = "/" + pathFromIgniteHome;
-
-        final String path = U.getIgniteHome() + pathFromIgniteHome;
-
-        delete(path);
-
-        File file = new File(path);
-
-        try (FileOutputStream fos = new FileOutputStream(file)) {
-            cfg.writeXml(fos);
-        }
-
-        assertTrue(file.exists());
-        return path;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return 3 * 60 * 1000;
-    }
-
-    /**
-     * Makes URI.
-     *
-     * @param scheme the scheme
-     * @param authority the authority
-     * @return URI String
-     */
-    static String mkUri(String scheme, String authority) {
-        return scheme + "://" + authority + "/";
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
deleted file mode 100644
index a9d7bad..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import junit.framework.TestSuite;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
-import org.apache.ignite.internal.util.typedef.G;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.igfs.IgfsMode.DUAL_ASYNC;
-import static org.apache.ignite.igfs.IgfsMode.DUAL_SYNC;
-import static org.apache.ignite.igfs.IgfsMode.PRIMARY;
-
-/**
- * Test suite for IGFS event tests.
- */
-@SuppressWarnings("PublicInnerClass")
-public class IgfsEventsTestSuite extends TestSuite {
-    /**
-     * @return Test suite.
-     * @throws Exception Thrown in case of the failure.
-     */
-    public static TestSuite suite() throws Exception {
-        ClassLoader ldr = TestSuite.class.getClassLoader();
-
-        TestSuite suite = new TestSuite("Ignite FS Events Test Suite");
-
-        suite.addTest(new TestSuite(ldr.loadClass(ShmemPrimary.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(ShmemDualSync.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(ShmemDualAsync.class.getName())));
-
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackPrimary.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
-
-        return suite;
-    }
-
-    /**
-     * @return Test suite with only tests that are supported on all platforms.
-     * @throws Exception Thrown in case of the failure.
-     */
-    public static TestSuite suiteNoarchOnly() throws Exception {
-        ClassLoader ldr = TestSuite.class.getClassLoader();
-
-        TestSuite suite = new TestSuite("Ignite IGFS Events Test Suite Noarch Only");
-
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackPrimary.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
-        suite.addTest(new TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
-
-        return suite;
-    }
-
-    /**
-     * Shared memory IPC in PRIVATE mode.
-     */
-    public static class ShmemPrimary extends IgfsEventsAbstractSelfTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
-
-            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-            endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
-            endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1);
-
-            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Loopback socket IPS in PRIVATE mode.
-     */
-    public static class LoopbackPrimary extends IgfsEventsAbstractSelfTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
-
-            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-            endpointCfg.setType(IgfsIpcEndpointType.TCP);
-            endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1);
-
-            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Base class for all IGFS tests with primary and secondary file system.
-     */
-    public abstract static class PrimarySecondaryTest extends IgfsEventsAbstractSelfTest {
-        /** Secondary file system. */
-        private static IgniteFileSystem igfsSec;
-
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
-                "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/",
-                "modules/core/src/test/config/hadoop/core-site-secondary.xml"));
-
-            return igfsCfg;
-        }
-
-        /**
-         * @return IGFS configuration for secondary file system.
-         */
-        protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setName("igfs-secondary");
-            igfsCfg.setDefaultMode(PRIMARY);
-
-            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-            endpointCfg.setType(IgfsIpcEndpointType.TCP);
-            endpointCfg.setPort(11500);
-
-            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-            return igfsCfg;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void beforeTestsStarted() throws Exception {
-            igfsSec = startSecondary();
-
-            super.beforeTestsStarted();
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void afterTestsStopped() throws Exception {
-            super.afterTestsStopped();
-
-            G.stopAll(true);
-        }
-
-        /** {@inheritDoc} */
-        @Override protected void afterTest() throws Exception {
-            super.afterTest();
-
-            // Clean up secondary file system.
-            igfsSec.format();
-        }
-
-        /**
-         * Start a grid with the secondary file system.
-         *
-         * @return Secondary file system handle.
-         * @throws Exception If failed.
-         */
-        @Nullable private IgniteFileSystem startSecondary() throws Exception {
-            IgniteConfiguration cfg = getConfiguration("grid-secondary", getSecondaryIgfsConfiguration());
-
-            cfg.setLocalHost("127.0.0.1");
-            cfg.setPeerClassLoadingEnabled(false);
-
-            Ignite secG = G.start(cfg);
-
-            return secG.fileSystem("igfs-secondary");
-        }
-    }
-
-    /**
-     * Shared memory IPC in DUAL_SYNC mode.
-     */
-    public static class ShmemDualSync extends PrimarySecondaryTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(DUAL_SYNC);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Shared memory IPC in DUAL_SYNC mode.
-     */
-    public static class ShmemDualAsync extends PrimarySecondaryTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(DUAL_ASYNC);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Loopback socket IPC with secondary file system.
-     */
-    public abstract static class LoopbackPrimarySecondaryTest extends PrimarySecondaryTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(IgfsMode.PRIMARY);
-
-            igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
-                "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/",
-                "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
-
-            return igfsCfg;
-        }
-
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getSecondaryIgfsConfiguration();
-
-            igfsCfg.setName("igfs-secondary");
-            igfsCfg.setDefaultMode(PRIMARY);
-
-            IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-            endpointCfg.setType(IgfsIpcEndpointType.TCP);
-            endpointCfg.setPort(11500);
-
-            igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Loopback IPC in DUAL_SYNC mode.
-     */
-    public static class LoopbackDualSync extends LoopbackPrimarySecondaryTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(DUAL_SYNC);
-
-            return igfsCfg;
-        }
-    }
-
-    /**
-     * Loopback socket IPC in DUAL_ASYNC mode.
-     */
-    public static class LoopbackDualAsync extends LoopbackPrimarySecondaryTest {
-        /** {@inheritDoc} */
-        @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException {
-            FileSystemConfiguration igfsCfg = super.getIgfsConfiguration();
-
-            igfsCfg.setDefaultMode(DUAL_ASYNC);
-
-            return igfsCfg;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
deleted file mode 100644
index 8e79356..0000000
--- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.igfs;
-
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Collection;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.FileSystemConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.configuration.NearCacheConfiguration;
-import org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheMode.PARTITIONED;
-import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FAILED;
-import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED;
-
-/**
- * Test hadoop file system implementation.
- */
-public class IgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest {
-    /** Path to the default hadoop configuration. */
-    public static final String HADOOP_FS_CFG = "examples/config/filesystem/core-site.xml";
-
-    /** Group size. */
-    public static final int GRP_SIZE = 128;
-
-    /** IP finder. */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** Node count. */
-    private int cnt;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        startGrids(nodeCount());
-
-        grid(0).createNearCache("data", new NearCacheConfiguration());
-
-        grid(0).createNearCache("meta", new NearCacheConfiguration());
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        G.stopAll(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER).setForceServerMode(true));
-
-        FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
-
-        igfsCfg.setDataCacheName("data");
-        igfsCfg.setMetaCacheName("meta");
-        igfsCfg.setName("igfs");
-
-        IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
-
-        endpointCfg.setType(IgfsIpcEndpointType.SHMEM);
-        endpointCfg.setPort(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt);
-
-        igfsCfg.setIpcEndpointConfiguration(endpointCfg);
-
-        igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups.
-
-        cfg.setFileSystemConfiguration(igfsCfg);
-
-        cfg.setCacheConfiguration(cacheConfiguration(gridName, "data"), cacheConfiguration(gridName, "meta"));
-
-        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
-
-        if (cnt == 0)
-            cfg.setClientMode(true);
-
-        cnt++;
-
-        return cfg;
-    }
-
-    /** @return Node count for test. */
-    protected int nodeCount() {
-        return 4;
-    }
-
-    /**
-     * Gets cache configuration.
-     *
-     * @param gridName Grid name.
-     * @return Cache configuration.
-     */
-    protected CacheConfiguration cacheConfiguration(String gridName, String cacheName) {
-        CacheConfiguration cacheCfg = defaultCacheConfiguration();
-
-        cacheCfg.setName(cacheName);
-        cacheCfg.setCacheMode(PARTITIONED);
-        cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
-        cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(GRP_SIZE));
-        cacheCfg.setBackups(0);
-        cacheCfg.setAtomicityMode(TRANSACTIONAL);
-
-        return cacheCfg;
-    }
-
-    /**
-     * Gets config of concrete File System.
-     *
-     * @return Config of concrete File System.
-     */
-    protected Configuration getFileSystemConfig() {
-        Configuration cfg = new Configuration();
-
-        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
-
-        return cfg;
-    }
-
-    /**
-     * Gets File System name.
-     *
-     * @param grid Grid index.
-     * @return File System name.
-     */
-    protected URI getFileSystemURI(int grid) {
-        try {
-            return new URI("igfs://127.0.0.1:" + (IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + grid));
-        }
-        catch (URISyntaxException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /** @throws Exception If failed. */
-    public void testContentsConsistency() throws Exception {
-        try (FileSystem fs = FileSystem.get(getFileSystemURI(0), getFileSystemConfig())) {
-            Collection<IgniteBiTuple<String, Long>> files = F.asList(
-                F.t("/dir1/dir2/file1", 1024L),
-                F.t("/dir1/dir2/file2", 8 * 1024L),
-                F.t("/dir1/file1", 1024 * 1024L),
-                F.t("/dir1/file2", 5 * 1024 * 1024L),
-                F.t("/file1", 64 * 1024L + 13),
-                F.t("/file2", 13L),
-                F.t("/file3", 123764L)
-            );
-
-            for (IgniteBiTuple<String, Long> file : files) {
-
-                info("Writing file: " + file.get1());
-
-                try (OutputStream os = fs.create(new Path(file.get1()), (short)3)) {
-                    byte[] data = new byte[file.get2().intValue()];
-
-                    data[0] = 25;
-                    data[data.length - 1] = 26;
-
-                    os.write(data);
-                }
-
-                info("Finished writing file: " + file.get1());
-            }
-
-            for (int i = 1; i < nodeCount(); i++) {
-
-                try (FileSystem ignored = FileSystem.get(getFileSystemURI(i), getFileSystemConfig())) {
-                    for (IgniteBiTuple<String, Long> file : files) {
-                        Path path = new Path(file.get1());
-
-                        FileStatus fileStatus = fs.getFileStatus(path);
-
-                        assertEquals(file.get2(), (Long)fileStatus.getLen());
-
-                        byte[] read = new byte[file.get2().intValue()];
-
-                        info("Reading file: " + path);
-
-                        try (FSDataInputStream in = fs.open(path)) {
-                            in.readFully(read);
-
-                            assert read[0] == 25;
-                            assert read[read.length - 1] == 26;
-                        }
-
-                        info("Finished reading file: " + path);
-                    }
-                }
-            }
-        }
-    }
-}
\ No newline at end of file


[16/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
deleted file mode 100644
index fa5cbc5..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.AbstractFileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.igfs.IgfsDirectoryNotEmptyException;
-import org.apache.ignite.igfs.IgfsParentNotDirectoryException;
-import org.apache.ignite.igfs.IgfsPathAlreadyExistsException;
-import org.apache.ignite.igfs.IgfsPathNotFoundException;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Utility constants and methods for IGFS Hadoop file system.
- */
-public class HadoopIgfsUtils {
-    /** Parameter name for endpoint no embed mode flag. */
-    public static final String PARAM_IGFS_ENDPOINT_NO_EMBED = "fs.igfs.%s.endpoint.no_embed";
-
-    /** Parameter name for endpoint no shared memory flag. */
-    public static final String PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM = "fs.igfs.%s.endpoint.no_local_shmem";
-
-    /** Parameter name for endpoint no local TCP flag. */
-    public static final String PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP = "fs.igfs.%s.endpoint.no_local_tcp";
-
-    /**
-     * Get string parameter.
-     *
-     * @param cfg Configuration.
-     * @param name Parameter name.
-     * @param authority Authority.
-     * @param dflt Default value.
-     * @return String value.
-     */
-    public static String parameter(Configuration cfg, String name, String authority, String dflt) {
-        return cfg.get(String.format(name, authority != null ? authority : ""), dflt);
-    }
-
-    /**
-     * Get integer parameter.
-     *
-     * @param cfg Configuration.
-     * @param name Parameter name.
-     * @param authority Authority.
-     * @param dflt Default value.
-     * @return Integer value.
-     * @throws IOException In case of parse exception.
-     */
-    public static int parameter(Configuration cfg, String name, String authority, int dflt) throws IOException {
-        String name0 = String.format(name, authority != null ? authority : "");
-
-        try {
-            return cfg.getInt(name0, dflt);
-        }
-        catch (NumberFormatException ignore) {
-            throw new IOException("Failed to parse parameter value to integer: " + name0);
-        }
-    }
-
-    /**
-     * Get boolean parameter.
-     *
-     * @param cfg Configuration.
-     * @param name Parameter name.
-     * @param authority Authority.
-     * @param dflt Default value.
-     * @return Boolean value.
-     */
-    public static boolean parameter(Configuration cfg, String name, String authority, boolean dflt) {
-        return cfg.getBoolean(String.format(name, authority != null ? authority : ""), dflt);
-    }
-
-    /**
-     * Cast Ignite exception to appropriate IO exception.
-     *
-     * @param e Exception to cast.
-     * @return Casted exception.
-     */
-    public static IOException cast(IgniteCheckedException e) {
-        return cast(e, null);
-    }
-
-    /**
-     * Cast Ignite exception to appropriate IO exception.
-     *
-     * @param e Exception to cast.
-     * @param path Path for exceptions.
-     * @return Casted exception.
-     */
-    @SuppressWarnings("unchecked")
-    public static IOException cast(IgniteCheckedException e, @Nullable String path) {
-        assert e != null;
-
-        // First check for any nested IOException; if exists - re-throw it.
-        if (e.hasCause(IOException.class))
-            return e.getCause(IOException.class);
-        else if (e.hasCause(IgfsPathNotFoundException.class))
-            return new FileNotFoundException(path); // TODO: Or PathNotFoundException?
-        else if (e.hasCause(IgfsParentNotDirectoryException.class))
-            return new ParentNotDirectoryException(path);
-        else if (path != null && e.hasCause(IgfsDirectoryNotEmptyException.class))
-            return new PathIsNotEmptyDirectoryException(path);
-        else if (path != null && e.hasCause(IgfsPathAlreadyExistsException.class))
-            return new PathExistsException(path);
-        else {
-            String msg = e.getMessage();
-
-            return msg == null ? new IOException(e) : new IOException(msg, e);
-        }
-    }
-
-    /**
-     * Deletes all files from the given file system.
-     *
-     * @param fs The file system to clean up.
-     * @throws IOException On error.
-     */
-    public static void clear(FileSystem fs) throws IOException {
-        // Delete root contents:
-        FileStatus[] statuses = fs.listStatus(new Path("/"));
-
-        if (statuses != null) {
-            for (FileStatus stat: statuses)
-                fs.delete(stat.getPath(), true);
-        }
-    }
-
-    /**
-     * Deletes all files from the given file system.
-     *
-     * @param fs The file system to clean up.
-     * @throws IOException On error.
-     */
-    public static void clear(AbstractFileSystem fs) throws IOException {
-        // Delete root contents:
-        FileStatus[] statuses = fs.listStatus(new Path("/"));
-
-        if (statuses != null) {
-            for (FileStatus stat: statuses)
-                fs.delete(stat.getPath(), true);
-        }
-    }
-
-    /**
-     * Constructor.
-     */
-    private HadoopIgfsUtils() {
-        // No-op.
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
deleted file mode 100644
index f4ee97f..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.igfs;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteFileSystem;
-import org.apache.ignite.IgniteIllegalStateException;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.igfs.IgfsBlockLocation;
-import org.apache.ignite.igfs.IgfsFile;
-import org.apache.ignite.igfs.IgfsPath;
-import org.apache.ignite.igfs.IgfsPathSummary;
-import org.apache.ignite.internal.processors.igfs.IgfsEx;
-import org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse;
-import org.apache.ignite.internal.processors.igfs.IgfsStatus;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.SB;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
-
-import static org.apache.ignite.IgniteState.STARTED;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint.LOCALHOST;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP;
-import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.parameter;
-
-/**
- * Wrapper for IGFS server.
- */
-public class HadoopIgfsWrapper implements HadoopIgfs {
-    /** Delegate. */
-    private final AtomicReference<Delegate> delegateRef = new AtomicReference<>();
-
-    /** Authority. */
-    private final String authority;
-
-    /** Connection string. */
-    private final HadoopIgfsEndpoint endpoint;
-
-    /** Log directory. */
-    private final String logDir;
-
-    /** Configuration. */
-    private final Configuration conf;
-
-    /** Logger. */
-    private final Log log;
-
-    /** The user name this wrapper works on behalf of. */
-    private final String userName;
-
-    /**
-     * Constructor.
-     *
-     * @param authority Authority (connection string).
-     * @param logDir Log directory for server.
-     * @param conf Configuration.
-     * @param log Current logger.
-     */
-    public HadoopIgfsWrapper(String authority, String logDir, Configuration conf, Log log, String user)
-        throws IOException {
-        try {
-            this.authority = authority;
-            this.endpoint = new HadoopIgfsEndpoint(authority);
-            this.logDir = logDir;
-            this.conf = conf;
-            this.log = log;
-            this.userName = user;
-        }
-        catch (IgniteCheckedException e) {
-            throw new IOException("Failed to parse endpoint: " + authority, e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsHandshakeResponse handshake(String logDir) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<IgfsHandshakeResponse>() {
-            @Override public IgfsHandshakeResponse apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) {
-                return hndResp;
-            }
-        });
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close(boolean force) {
-        Delegate delegate = delegateRef.get();
-
-        if (delegate != null && delegateRef.compareAndSet(delegate, null))
-            delegate.close(force);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(final IgfsPath path) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<IgfsFile>() {
-            @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.info(path);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile update(final IgfsPath path, final Map<String, String> props) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<IgfsFile>() {
-            @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.update(path, props);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean setTimes(final IgfsPath path, final long accessTime, final long modificationTime)
-        throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Boolean>() {
-            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.setTimes(path, accessTime, modificationTime);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean rename(final IgfsPath src, final IgfsPath dest) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Boolean>() {
-            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.rename(src, dest);
-            }
-        }, src);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean delete(final IgfsPath path, final boolean recursive) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Boolean>() {
-            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.delete(path, recursive);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsBlockLocation> affinity(final IgfsPath path, final long start,
-        final long len) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Collection<IgfsBlockLocation>>() {
-            @Override public Collection<IgfsBlockLocation> apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.affinity(path, start, len);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsPathSummary contentSummary(final IgfsPath path) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<IgfsPathSummary>() {
-            @Override public IgfsPathSummary apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.contentSummary(path);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean mkdirs(final IgfsPath path, final Map<String, String> props) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Boolean>() {
-            @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.mkdirs(path, props);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(final IgfsPath path) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Collection<IgfsFile>>() {
-            @Override public Collection<IgfsFile> apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.listFiles(path);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(final IgfsPath path) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<Collection<IgfsPath>>() {
-            @Override public Collection<IgfsPath> apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.listPaths(path);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsStatus fsStatus() throws IOException {
-        return withReconnectHandling(new FileSystemClosure<IgfsStatus>() {
-            @Override public IgfsStatus apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp)
-                throws IgniteCheckedException, IOException {
-                return hadoop.fsStatus();
-            }
-        });
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
-            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.open(path);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch)
-        throws IOException {
-        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
-            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.open(path, seqReadsBeforePrefetch);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate create(final IgfsPath path, final boolean overwrite,
-        final boolean colocate, final int replication, final long blockSize, @Nullable final Map<String, String> props)
-        throws IOException {
-        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
-            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.create(path, overwrite, colocate, replication, blockSize, props);
-            }
-        }, path);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate append(final IgfsPath path, final boolean create,
-        @Nullable final Map<String, String> props) throws IOException {
-        return withReconnectHandling(new FileSystemClosure<HadoopIgfsStreamDelegate>() {
-            @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop,
-                IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException {
-                return hadoop.append(path, create, props);
-            }
-        }, path);
-    }
-
-    /**
-     * Execute closure which is not path-specific.
-     *
-     * @param clo Closure.
-     * @return Result.
-     * @throws IOException If failed.
-     */
-    private <T> T withReconnectHandling(FileSystemClosure<T> clo) throws IOException {
-        return withReconnectHandling(clo, null);
-    }
-
-    /**
-     * Execute closure.
-     *
-     * @param clo Closure.
-     * @param path Path for exceptions.
-     * @return Result.
-     * @throws IOException If failed.
-     */
-    private <T> T withReconnectHandling(final FileSystemClosure<T> clo, @Nullable IgfsPath path)
-        throws IOException {
-        Exception err = null;
-
-        for (int i = 0; i < 2; i++) {
-            Delegate curDelegate = null;
-
-            boolean close = false;
-            boolean force = false;
-
-            try {
-                curDelegate = delegate();
-
-                assert curDelegate != null;
-
-                close = curDelegate.doomed;
-
-                return clo.apply(curDelegate.hadoop, curDelegate.hndResp);
-            }
-            catch (HadoopIgfsCommunicationException e) {
-                if (curDelegate != null && !curDelegate.doomed) {
-                    // Try getting rid fo faulty delegate ASAP.
-                    delegateRef.compareAndSet(curDelegate, null);
-
-                    close = true;
-                    force = true;
-                }
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to send message to a server: " + e);
-
-                err = e;
-            }
-            catch (IgniteCheckedException e) {
-                throw HadoopIgfsUtils.cast(e, path != null ? path.toString() : null);
-            }
-            finally {
-                if (close) {
-                    assert curDelegate != null;
-
-                    curDelegate.close(force);
-                }
-            }
-        }
-
-        List<Throwable> list = X.getThrowableList(err);
-
-        Throwable cause = list.get(list.size() - 1);
-
-        throw new IOException("Failed to communicate with IGFS: "
-            + (cause.getMessage() == null ? cause.toString() : cause.getMessage()), err);
-    }
-
-    /**
-     * Get delegate creating it if needed.
-     *
-     * @return Delegate.
-     */
-    private Delegate delegate() throws HadoopIgfsCommunicationException {
-        // These fields will contain possible exceptions from shmem and TCP endpoints.
-        Exception errShmem = null;
-        Exception errTcp = null;
-
-        // 1. If delegate is set, return it immediately.
-        Delegate curDelegate = delegateRef.get();
-
-        if (curDelegate != null)
-            return curDelegate;
-
-        // 2. Guess that we are in the same VM.
-        boolean skipInProc = parameter(conf, PARAM_IGFS_ENDPOINT_NO_EMBED, authority, false);
-
-        if (!skipInProc) {
-            IgfsEx igfs = getIgfsEx(endpoint.grid(), endpoint.igfs());
-
-            if (igfs != null) {
-                HadoopIgfsEx hadoop = null;
-
-                try {
-                    hadoop = new HadoopIgfsInProc(igfs, log, userName);
-
-                    curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
-                }
-                catch (IOException | IgniteCheckedException e) {
-                    if (e instanceof HadoopIgfsCommunicationException)
-                        if (hadoop != null)
-                            hadoop.close(true);
-
-                    if (log.isDebugEnabled())
-                        log.debug("Failed to connect to in-process IGFS, fallback to IPC mode.", e);
-                }
-            }
-        }
-
-        // 3. Try connecting using shmem.
-        boolean skipLocShmem = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority, false);
-
-        if (curDelegate == null && !skipLocShmem && !U.isWindows()) {
-            HadoopIgfsEx hadoop = null;
-
-            try {
-                hadoop = new HadoopIgfsOutProc(endpoint.port(), endpoint.grid(), endpoint.igfs(), log, userName);
-
-                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
-            }
-            catch (IOException | IgniteCheckedException e) {
-                if (e instanceof HadoopIgfsCommunicationException)
-                    hadoop.close(true);
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to connect to IGFS using shared memory [port=" + endpoint.port() + ']', e);
-
-                errShmem = e;
-            }
-        }
-
-        // 4. Try local TCP connection.
-        boolean skipLocTcp = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority, false);
-
-        if (curDelegate == null && !skipLocTcp) {
-            HadoopIgfsEx hadoop = null;
-
-            try {
-                hadoop = new HadoopIgfsOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.igfs(),
-                    log, userName);
-
-                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
-            }
-            catch (IOException | IgniteCheckedException e) {
-                if (e instanceof HadoopIgfsCommunicationException)
-                    hadoop.close(true);
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() +
-                        ", port=" + endpoint.port() + ']', e);
-
-                errTcp = e;
-            }
-        }
-
-        // 5. Try remote TCP connection.
-        if (curDelegate == null && (skipLocTcp || !F.eq(LOCALHOST, endpoint.host()))) {
-            HadoopIgfsEx hadoop = null;
-
-            try {
-                hadoop = new HadoopIgfsOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.igfs(),
-                    log, userName);
-
-                curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
-            }
-            catch (IOException | IgniteCheckedException e) {
-                if (e instanceof HadoopIgfsCommunicationException)
-                    hadoop.close(true);
-
-                if (log.isDebugEnabled())
-                    log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() +
-                        ", port=" + endpoint.port() + ']', e);
-
-                errTcp = e;
-            }
-        }
-
-        if (curDelegate != null) {
-            if (!delegateRef.compareAndSet(null, curDelegate))
-                curDelegate.doomed = true;
-
-            return curDelegate;
-        }
-        else {
-            SB errMsg = new SB("Failed to connect to IGFS [endpoint=igfs://" + authority + ", attempts=[");
-
-            if (errShmem != null)
-                errMsg.a("[type=SHMEM, port=" + endpoint.port() + ", err=" + errShmem + "], ");
-
-            errMsg.a("[type=TCP, host=" + endpoint.host() + ", port=" + endpoint.port() + ", err=" + errTcp + "]] ");
-
-            errMsg.a("(ensure that IGFS is running and have IPC endpoint enabled; ensure that " +
-                "ignite-shmem-1.0.0.jar is in Hadoop classpath if you use shared memory endpoint).");
-
-            throw new HadoopIgfsCommunicationException(errMsg.toString());
-        }
-    }
-
-    /**
-     * File system operation closure.
-     */
-    private static interface FileSystemClosure<T> {
-        /**
-         * Call closure body.
-         *
-         * @param hadoop RPC handler.
-         * @param hndResp Handshake response.
-         * @return Result.
-         * @throws IgniteCheckedException If failed.
-         * @throws IOException If failed.
-         */
-        public T apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException;
-    }
-
-    /**
-     * Delegate.
-     */
-    private static class Delegate {
-        /** RPC handler. */
-        private final HadoopIgfsEx hadoop;
-
-        /** Handshake request. */
-        private final IgfsHandshakeResponse hndResp;
-
-        /** Close guard. */
-        private final AtomicBoolean closeGuard = new AtomicBoolean();
-
-        /** Whether this delegate must be closed at the end of the next invocation. */
-        private boolean doomed;
-
-        /**
-         * Constructor.
-         *
-         * @param hadoop Hadoop.
-         * @param hndResp Handshake response.
-         */
-        private Delegate(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) {
-            this.hadoop = hadoop;
-            this.hndResp = hndResp;
-        }
-
-        /**
-         * Close underlying RPC handler.
-         *
-         * @param force Force flag.
-         */
-        private void close(boolean force) {
-            if (closeGuard.compareAndSet(false, true))
-                hadoop.close(force);
-        }
-    }
-
-    /**
-     * Helper method to find Igfs of the given name in the given Ignite instance.
-     *
-     * @param gridName The name of the grid to check.
-     * @param igfsName The name of Igfs.
-     * @return The file system instance, or null if not found.
-     */
-    private static IgfsEx getIgfsEx(@Nullable String gridName, @Nullable String igfsName) {
-        if (Ignition.state(gridName) == STARTED) {
-            try {
-                for (IgniteFileSystem fs : Ignition.ignite(gridName).fileSystems()) {
-                    if (F.eq(fs.name(), igfsName))
-                        return (IgfsEx)fs;
-                }
-            }
-            catch (IgniteIllegalStateException ignore) {
-                // May happen if the grid state has changed:
-            }
-        }
-
-        return null;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
----------------------------------------------------------------------
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
deleted file mode 100644
index 090b336..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.hadoop.jobtracker;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Collection;
-import java.util.Map;
-import java.util.UUID;
-import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
-import org.apache.ignite.internal.processors.hadoop.HadoopJobPhase;
-import org.apache.ignite.internal.processors.hadoop.HadoopMapReducePlan;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
-import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
-import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.HadoopProcessDescriptor;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.PHASE_SETUP;
-
-/**
- * Hadoop job metadata. Internal object used for distributed job state tracking.
- */
-public class HadoopJobMetadata implements Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Job ID. */
-    private HadoopJobId jobId;
-
-    /** Job info. */
-    private HadoopJobInfo jobInfo;
-
-    /** Node submitted job. */
-    private UUID submitNodeId;
-
-    /** Map-reduce plan. */
-    private HadoopMapReducePlan mrPlan;
-
-    /** Pending splits for which mapper should be executed. */
-    private Map<HadoopInputSplit, Integer> pendingSplits;
-
-    /** Pending reducers. */
-    private Collection<Integer> pendingReducers;
-
-    /** Reducers addresses. */
-    @GridToStringInclude
-    private Map<Integer, HadoopProcessDescriptor> reducersAddrs;
-
-    /** Job phase. */
-    private HadoopJobPhase phase = PHASE_SETUP;
-
-    /** Fail cause. */
-    @GridToStringExclude
-    private Throwable failCause;
-
-    /** Version. */
-    private long ver;
-
-    /** Job counters */
-    private HadoopCounters counters = new HadoopCountersImpl();
-
-    /**
-     * Empty constructor required by {@link Externalizable}.
-     */
-    public HadoopJobMetadata() {
-        // No-op.
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param submitNodeId Submit node ID.
-     * @param jobId Job ID.
-     * @param jobInfo Job info.
-     */
-    public HadoopJobMetadata(UUID submitNodeId, HadoopJobId jobId, HadoopJobInfo jobInfo) {
-        this.jobId = jobId;
-        this.jobInfo = jobInfo;
-        this.submitNodeId = submitNodeId;
-    }
-
-    /**
-     * Copy constructor.
-     *
-     * @param src Metadata to copy.
-     */
-    public HadoopJobMetadata(HadoopJobMetadata src) {
-        // Make sure to preserve alphabetic order.
-        counters = src.counters;
-        failCause = src.failCause;
-        jobId = src.jobId;
-        jobInfo = src.jobInfo;
-        mrPlan = src.mrPlan;
-        pendingSplits = src.pendingSplits;
-        pendingReducers = src.pendingReducers;
-        phase = src.phase;
-        reducersAddrs = src.reducersAddrs;
-        submitNodeId = src.submitNodeId;
-        ver = src.ver + 1;
-    }
-
-    /**
-     * @return Submit node ID.
-     */
-    public UUID submitNodeId() {
-        return submitNodeId;
-    }
-
-    /**
-     * @param phase Job phase.
-     */
-    public void phase(HadoopJobPhase phase) {
-        this.phase = phase;
-    }
-
-    /**
-     * @return Job phase.
-     */
-    public HadoopJobPhase phase() {
-        return phase;
-    }
-
-    /**
-     * Gets reducers addresses for external execution.
-     *
-     * @return Reducers addresses.
-     */
-    public Map<Integer, HadoopProcessDescriptor> reducersAddresses() {
-        return reducersAddrs;
-    }
-
-    /**
-     * Sets reducers addresses for external execution.
-     *
-     * @param reducersAddrs Map of addresses.
-     */
-    public void reducersAddresses(Map<Integer, HadoopProcessDescriptor> reducersAddrs) {
-        this.reducersAddrs = reducersAddrs;
-    }
-
-    /**
-     * Sets collection of pending splits.
-     *
-     * @param pendingSplits Collection of pending splits.
-     */
-    public void pendingSplits(Map<HadoopInputSplit, Integer> pendingSplits) {
-        this.pendingSplits = pendingSplits;
-    }
-
-    /**
-     * Gets collection of pending splits.
-     *
-     * @return Collection of pending splits.
-     */
-    public Map<HadoopInputSplit, Integer> pendingSplits() {
-        return pendingSplits;
-    }
-
-    /**
-     * Sets collection of pending reducers.
-     *
-     * @param pendingReducers Collection of pending reducers.
-     */
-    public void pendingReducers(Collection<Integer> pendingReducers) {
-        this.pendingReducers = pendingReducers;
-    }
-
-    /**
-     * Gets collection of pending reducers.
-     *
-     * @return Collection of pending reducers.
-     */
-    public Collection<Integer> pendingReducers() {
-        return pendingReducers;
-    }
-
-    /**
-     * @return Job ID.
-     */
-    public HadoopJobId jobId() {
-        return jobId;
-    }
-
-    /**
-     * @param mrPlan Map-reduce plan.
-     */
-    public void mapReducePlan(HadoopMapReducePlan mrPlan) {
-        assert this.mrPlan == null : "Map-reduce plan can only be initialized once.";
-
-        this.mrPlan = mrPlan;
-    }
-
-    /**
-     * @return Map-reduce plan.
-     */
-    public HadoopMapReducePlan mapReducePlan() {
-        return mrPlan;
-    }
-
-    /**
-     * @return Job info.
-     */
-    public HadoopJobInfo jobInfo() {
-        return jobInfo;
-    }
-
-    /**
-     * Returns job counters.
-     *
-     * @return Collection of counters.
-     */
-    public HadoopCounters counters() {
-        return counters;
-    }
-
-    /**
-     * Sets counters.
-     *
-     * @param counters Collection of counters.
-     */
-    public void counters(HadoopCounters counters) {
-        this.counters = counters;
-    }
-
-    /**
-     * @param failCause Fail cause.
-     */
-    public void failCause(Throwable failCause) {
-        assert failCause != null;
-
-        if (this.failCause == null) // Keep the first error.
-            this.failCause = failCause;
-    }
-
-    /**
-     * @return Fail cause.
-     */
-    public Throwable failCause() {
-        return failCause;
-    }
-
-    /**
-     * @return Version.
-     */
-    public long version() {
-        return ver;
-    }
-
-    /**
-     * @param split Split.
-     * @return Task number.
-     */
-    public int taskNumber(HadoopInputSplit split) {
-        return pendingSplits.get(split);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        U.writeUuid(out, submitNodeId);
-        out.writeObject(jobId);
-        out.writeObject(jobInfo);
-        out.writeObject(mrPlan);
-        out.writeObject(pendingSplits);
-        out.writeObject(pendingReducers);
-        out.writeObject(phase);
-        out.writeObject(failCause);
-        out.writeLong(ver);
-        out.writeObject(reducersAddrs);
-        out.writeObject(counters);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        submitNodeId = U.readUuid(in);
-        jobId = (HadoopJobId)in.readObject();
-        jobInfo = (HadoopJobInfo)in.readObject();
-        mrPlan = (HadoopMapReducePlan)in.readObject();
-        pendingSplits = (Map<HadoopInputSplit,Integer>)in.readObject();
-        pendingReducers = (Collection<Integer>)in.readObject();
-        phase = (HadoopJobPhase)in.readObject();
-        failCause = (Throwable)in.readObject();
-        ver = in.readLong();
-        reducersAddrs = (Map<Integer, HadoopProcessDescriptor>)in.readObject();
-        counters = (HadoopCounters)in.readObject();
-    }
-
-    /** {@inheritDoc} */
-    public String toString() {
-        return S.toString(HadoopJobMetadata.class, this, "pendingMaps", pendingSplits.size(),
-            "pendingReduces", pendingReducers.size(), "failCause", failCause == null ? null :
-                failCause.getClass().getName());
-    }
-}
\ No newline at end of file


[38/51] [abbrv] [partial] ignite git commit: IGNITE-3916: Created separate module.

Posted by vo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
new file mode 100644
index 0000000..595474c
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java
@@ -0,0 +1,445 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Queue;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapreduce.JobSubmissionFiles;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.split.JobSplit;
+import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
+import org.apache.ignite.internal.processors.hadoop.HadoopDefaultJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1Splitter;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+import org.jsr166.ConcurrentHashMap8;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.jobLocalDir;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.taskLocalDir;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.transformException;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.FsCacheKey;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.createHadoopLazyConcurrentMap;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.fileSystemForMrUserWithCaching;
+
+/**
+ * Hadoop job implementation for v2 API.
+ */
+public class HadoopV2Job implements HadoopJob {
+    /** */
+    private final JobConf jobConf;
+
+    /** */
+    private final JobContextImpl jobCtx;
+
+    /** Hadoop job ID. */
+    private final HadoopJobId jobId;
+
+    /** Job info. */
+    protected final HadoopJobInfo jobInfo;
+
+    /** Native library names. */
+    private final String[] libNames;
+
+    /** */
+    private final JobID hadoopJobID;
+
+    /** */
+    private final HadoopV2JobResourceManager rsrcMgr;
+
+    /** */
+    private final ConcurrentMap<T2<HadoopTaskType, Integer>, GridFutureAdapter<HadoopTaskContext>> ctxs =
+        new ConcurrentHashMap8<>();
+
+    /** Pooling task context class and thus class loading environment. */
+    private final Queue<Class<? extends HadoopTaskContext>> taskCtxClsPool = new ConcurrentLinkedQueue<>();
+
+    /** All created contexts. */
+    private final Queue<Class<? extends HadoopTaskContext>> fullCtxClsQueue = new ConcurrentLinkedDeque<>();
+
+    /** File system cache map. */
+    private final HadoopLazyConcurrentMap<FsCacheKey, FileSystem> fsMap = createHadoopLazyConcurrentMap();
+
+    /** Local node ID */
+    private volatile UUID locNodeId;
+
+    /** Serialized JobConf. */
+    private volatile byte[] jobConfData;
+
+    /**
+     * Constructor.
+     *
+     * @param jobId Job ID.
+     * @param jobInfo Job info.
+     * @param log Logger.
+     * @param libNames Optional additional native library names.
+     */
+    public HadoopV2Job(HadoopJobId jobId, final HadoopDefaultJobInfo jobInfo, IgniteLogger log,
+        @Nullable String[] libNames) {
+        assert jobId != null;
+        assert jobInfo != null;
+
+        this.jobId = jobId;
+        this.jobInfo = jobInfo;
+        this.libNames = libNames;
+
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
+
+        try {
+            hadoopJobID = new JobID(jobId.globalId().toString(), jobId.localId());
+
+            jobConf = new JobConf();
+
+            HadoopFileSystemsUtils.setupFileSystems(jobConf);
+
+            for (Map.Entry<String,String> e : jobInfo.properties().entrySet())
+                jobConf.set(e.getKey(), e.getValue());
+
+            jobCtx = new JobContextImpl(jobConf, hadoopJobID);
+
+            rsrcMgr = new HadoopV2JobResourceManager(jobId, jobCtx, log, this);
+        }
+        finally {
+            HadoopUtils.setContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobId id() {
+        return jobId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopJobInfo info() {
+        return jobInfo;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Collection<HadoopInputSplit> input() throws IgniteCheckedException {
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf.getClassLoader());
+
+        try {
+            String jobDirPath = jobConf.get(MRJobConfig.MAPREDUCE_JOB_DIR);
+
+            if (jobDirPath == null) { // Probably job was submitted not by hadoop client.
+                // Assume that we have needed classes and try to generate input splits ourself.
+                if (jobConf.getUseNewMapper())
+                    return HadoopV2Splitter.splitJob(jobCtx);
+                else
+                    return HadoopV1Splitter.splitJob(jobConf);
+            }
+
+            Path jobDir = new Path(jobDirPath);
+
+            try {
+                FileSystem fs = fileSystem(jobDir.toUri(), jobConf);
+
+                JobSplit.TaskSplitMetaInfo[] metaInfos = SplitMetaInfoReader.readSplitMetaInfo(hadoopJobID, fs, jobConf,
+                    jobDir);
+
+                if (F.isEmpty(metaInfos))
+                    throw new IgniteCheckedException("No input splits found.");
+
+                Path splitsFile = JobSubmissionFiles.getJobSplitFile(jobDir);
+
+                try (FSDataInputStream in = fs.open(splitsFile)) {
+                    Collection<HadoopInputSplit> res = new ArrayList<>(metaInfos.length);
+
+                    for (JobSplit.TaskSplitMetaInfo metaInfo : metaInfos) {
+                        long off = metaInfo.getStartOffset();
+
+                        String[] hosts = metaInfo.getLocations();
+
+                        in.seek(off);
+
+                        String clsName = Text.readString(in);
+
+                        HadoopFileBlock block = HadoopV1Splitter.readFileBlock(clsName, in, hosts);
+
+                        if (block == null)
+                            block = HadoopV2Splitter.readFileBlock(clsName, in, hosts);
+
+                        res.add(block != null ? block : new HadoopExternalSplit(hosts, off));
+                    }
+
+                    return res;
+                }
+            }
+            catch (Throwable e) {
+                if (e instanceof Error)
+                    throw (Error)e;
+                else
+                    throw transformException(e);
+            }
+        }
+        finally {
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings({"unchecked", "MismatchedQueryAndUpdateOfCollection" })
+    @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
+        T2<HadoopTaskType, Integer> locTaskId = new T2<>(info.type(),  info.taskNumber());
+
+        GridFutureAdapter<HadoopTaskContext> fut = ctxs.get(locTaskId);
+
+        if (fut != null)
+            return fut.get();
+
+        GridFutureAdapter<HadoopTaskContext> old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>());
+
+        if (old != null)
+            return old.get();
+
+        Class<? extends HadoopTaskContext> cls = taskCtxClsPool.poll();
+
+        try {
+            if (cls == null) {
+                // If there is no pooled class, then load new one.
+                // Note that the classloader identified by the task it was initially created for,
+                // but later it may be reused for other tasks.
+                HadoopClassLoader ldr = new HadoopClassLoader(rsrcMgr.classPath(),
+                    HadoopClassLoader.nameForTask(info, false), libNames);
+
+                cls = (Class<? extends HadoopTaskContext>)ldr.loadClass(HadoopV2TaskContext.class.getName());
+
+                fullCtxClsQueue.add(cls);
+            }
+
+            Constructor<?> ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJob.class,
+                HadoopJobId.class, UUID.class, DataInput.class);
+
+            if (jobConfData == null)
+                synchronized(jobConf) {
+                    if (jobConfData == null) {
+                        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+
+                        jobConf.write(new DataOutputStream(buf));
+
+                        jobConfData = buf.toByteArray();
+                    }
+                }
+
+            HadoopTaskContext res = (HadoopTaskContext)ctr.newInstance(info, this, jobId, locNodeId,
+                new DataInputStream(new ByteArrayInputStream(jobConfData)));
+
+            fut.onDone(res);
+
+            return res;
+        }
+        catch (Throwable e) {
+            IgniteCheckedException te = transformException(e);
+
+            fut.onDone(te);
+
+            if (e instanceof Error)
+                throw (Error)e;
+
+            throw te;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void initialize(boolean external, UUID locNodeId) throws IgniteCheckedException {
+        assert locNodeId != null;
+
+        this.locNodeId = locNodeId;
+
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
+
+        try {
+            rsrcMgr.prepareJobEnvironment(!external, jobLocalDir(locNodeId, jobId));
+        }
+        finally {
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ThrowFromFinallyBlock")
+    @Override public void dispose(boolean external) throws IgniteCheckedException {
+        try {
+            if (rsrcMgr != null && !external) {
+                File jobLocDir = jobLocalDir(locNodeId, jobId);
+
+                if (jobLocDir.exists())
+                    U.delete(jobLocDir);
+            }
+        }
+        finally {
+            taskCtxClsPool.clear();
+
+            Throwable err = null;
+
+            // Stop the daemon threads that have been created
+            // with the task class loaders:
+            while (true) {
+                Class<? extends HadoopTaskContext> cls = fullCtxClsQueue.poll();
+
+                if (cls == null)
+                    break;
+
+                try {
+                    final ClassLoader ldr = cls.getClassLoader();
+
+                    try {
+                        // Stop Hadoop daemons for this *task*:
+                        stopHadoopFsDaemons(ldr);
+                    }
+                    catch (Exception e) {
+                        if (err == null)
+                            err = e;
+                    }
+
+                    // Also close all the FileSystems cached in
+                    // HadoopLazyConcurrentMap for this *task* class loader:
+                    closeCachedTaskFileSystems(ldr);
+                }
+                catch (Throwable e) {
+                    if (err == null)
+                        err = e;
+
+                    if (e instanceof Error)
+                        throw (Error)e;
+                }
+            }
+
+            assert fullCtxClsQueue.isEmpty();
+
+            try {
+                // Close all cached file systems for this *Job*:
+                fsMap.close();
+            }
+            catch (Exception e) {
+                if (err == null)
+                    err = e;
+            }
+
+            if (err != null)
+                throw U.cast(err);
+        }
+    }
+
+    /**
+     * Stops Hadoop Fs daemon threads.
+     * @param ldr The task ClassLoader to stop the daemons for.
+     * @throws Exception On error.
+     */
+    private void stopHadoopFsDaemons(ClassLoader ldr) throws Exception {
+        Class<?> daemonCls = ldr.loadClass(HadoopClassLoader.CLS_DAEMON);
+
+        Method m = daemonCls.getMethod("dequeueAndStopAll");
+
+        m.invoke(null);
+    }
+
+    /**
+     * Closes all the file systems user by task
+     * @param ldr The task class loader.
+     * @throws Exception On error.
+     */
+    private void closeCachedTaskFileSystems(ClassLoader ldr) throws Exception {
+        Class<?> clazz = ldr.loadClass(HadoopV2TaskContext.class.getName());
+
+        Method m = clazz.getMethod("close");
+
+        m.invoke(null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
+        rsrcMgr.prepareTaskWorkDir(taskLocalDir(locNodeId, info));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException {
+        HadoopTaskContext ctx = ctxs.remove(new T2<>(info.type(), info.taskNumber())).get();
+
+        taskCtxClsPool.add(ctx.getClass());
+
+        File locDir = taskLocalDir(locNodeId, info);
+
+        if (locDir.exists())
+            U.delete(locDir);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cleanupStagingDirectory() {
+        rsrcMgr.cleanupStagingDirectory();
+    }
+
+    /**
+     * Getter for job configuration.
+     * @return The job configuration.
+     */
+    public JobConf jobConf() {
+        return jobConf;
+    }
+
+    /**
+     * Gets file system for this job.
+     * @param uri The uri.
+     * @param cfg The configuration.
+     * @return The file system.
+     * @throws IOException On error.
+     */
+    public FileSystem fileSystem(@Nullable URI uri, Configuration cfg) throws IOException {
+        return fileSystemForMrUserWithCaching(uri, cfg, fsMap);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
new file mode 100644
index 0000000..33aef60
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.FileSystemException;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.util.RunJar;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemsUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Provides all resources are needed to the job execution. Downloads the main jar, the configuration and additional
+ * files are needed to be placed on local files system.
+ */
+class HadoopV2JobResourceManager {
+    /** File type Fs disable caching property name. */
+    private static final String FILE_DISABLE_CACHING_PROPERTY_NAME =
+        HadoopFileSystemsUtils.disableFsCachePropertyName("file");
+
+    /** Hadoop job context. */
+    private final JobContextImpl ctx;
+
+    /** Logger. */
+    private final IgniteLogger log;
+
+    /** Job ID. */
+    private final HadoopJobId jobId;
+
+    /** Class path list. */
+    private URL[] clsPath;
+
+    /** Set of local resources. */
+    private final Collection<File> rsrcSet = new HashSet<>();
+
+    /** Staging directory to delivery job jar and config to the work nodes. */
+    private Path stagingDir;
+
+    /** The job. */
+    private final HadoopV2Job job;
+
+    /**
+     * Creates new instance.
+     * @param jobId Job ID.
+     * @param ctx Hadoop job context.
+     * @param log Logger.
+     */
+    public HadoopV2JobResourceManager(HadoopJobId jobId, JobContextImpl ctx, IgniteLogger log, HadoopV2Job job) {
+        this.jobId = jobId;
+        this.ctx = ctx;
+        this.log = log.getLogger(HadoopV2JobResourceManager.class);
+        this.job = job;
+    }
+
+    /**
+     * Set working directory in local file system.
+     *
+     * @param dir Working directory.
+     * @throws IOException If fails.
+     */
+    private void setLocalFSWorkingDirectory(File dir) throws IOException {
+        JobConf cfg = ctx.getJobConf();
+
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(cfg.getClassLoader());
+
+        try {
+            cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, dir.getAbsolutePath());
+
+            if (!cfg.getBoolean(FILE_DISABLE_CACHING_PROPERTY_NAME, false))
+                FileSystem.getLocal(cfg).setWorkingDirectory(new Path(dir.getAbsolutePath()));
+        }
+        finally {
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /**
+     * Prepare job resources. Resolve the classpath list and download it if needed.
+     *
+     * @param download {@code true} If need to download resources.
+     * @param jobLocDir Work directory for the job.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void prepareJobEnvironment(boolean download, File jobLocDir) throws IgniteCheckedException {
+        try {
+            if (jobLocDir.exists())
+                throw new IgniteCheckedException("Local job directory already exists: " + jobLocDir.getAbsolutePath());
+
+            JobConf cfg = ctx.getJobConf();
+
+            String mrDir = cfg.get("mapreduce.job.dir");
+
+            if (mrDir != null) {
+                stagingDir = new Path(new URI(mrDir));
+
+                if (download) {
+                    FileSystem fs = job.fileSystem(stagingDir.toUri(), cfg);
+
+                    if (!fs.exists(stagingDir))
+                        throw new IgniteCheckedException("Failed to find map-reduce submission " +
+                            "directory (does not exist): " + stagingDir);
+
+                    if (!FileUtil.copy(fs, stagingDir, jobLocDir, false, cfg))
+                        throw new IgniteCheckedException("Failed to copy job submission directory "
+                            + "contents to local file system "
+                            + "[path=" + stagingDir + ", locDir=" + jobLocDir.getAbsolutePath()
+                            + ", jobId=" + jobId + ']');
+                }
+
+                File jarJobFile = new File(jobLocDir, "job.jar");
+
+                Collection<URL> clsPathUrls = new ArrayList<>();
+
+                clsPathUrls.add(jarJobFile.toURI().toURL());
+
+                rsrcSet.add(jarJobFile);
+                rsrcSet.add(new File(jobLocDir, "job.xml"));
+
+                processFiles(jobLocDir, ctx.getCacheFiles(), download, false, null, MRJobConfig.CACHE_LOCALFILES);
+                processFiles(jobLocDir, ctx.getCacheArchives(), download, true, null, MRJobConfig.CACHE_LOCALARCHIVES);
+                processFiles(jobLocDir, ctx.getFileClassPaths(), download, false, clsPathUrls, null);
+                processFiles(jobLocDir, ctx.getArchiveClassPaths(), download, true, clsPathUrls, null);
+
+                if (!clsPathUrls.isEmpty()) {
+                    clsPath = new URL[clsPathUrls.size()];
+
+                    clsPathUrls.toArray(clsPath);
+                }
+            }
+            else if (!jobLocDir.mkdirs())
+                throw new IgniteCheckedException("Failed to create local job directory: "
+                    + jobLocDir.getAbsolutePath());
+
+            setLocalFSWorkingDirectory(jobLocDir);
+        }
+        catch (URISyntaxException | IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /**
+     * Process list of resources.
+     *
+     * @param jobLocDir Job working directory.
+     * @param files Array of {@link java.net.URI} or {@link org.apache.hadoop.fs.Path} to process resources.
+     * @param download {@code true}, if need to download. Process class path only else.
+     * @param extract {@code true}, if need to extract archive.
+     * @param clsPathUrls Collection to add resource as classpath resource.
+     * @param rsrcNameProp Property for resource name array setting.
+     * @throws IOException If failed.
+     */
+    private void processFiles(File jobLocDir, @Nullable Object[] files, boolean download, boolean extract,
+        @Nullable Collection<URL> clsPathUrls, @Nullable String rsrcNameProp) throws IOException {
+        if (F.isEmptyOrNulls(files))
+            return;
+
+        Collection<String> res = new ArrayList<>();
+
+        for (Object pathObj : files) {
+            Path srcPath;
+
+            if (pathObj instanceof URI) {
+                URI uri = (URI)pathObj;
+
+                srcPath = new Path(uri);
+            }
+            else
+                srcPath = (Path)pathObj;
+
+            String locName = srcPath.getName();
+
+            File dstPath = new File(jobLocDir.getAbsolutePath(), locName);
+
+            res.add(locName);
+
+            rsrcSet.add(dstPath);
+
+            if (clsPathUrls != null)
+                clsPathUrls.add(dstPath.toURI().toURL());
+
+            if (!download)
+                continue;
+
+            JobConf cfg = ctx.getJobConf();
+
+            FileSystem dstFs = FileSystem.getLocal(cfg);
+
+            FileSystem srcFs = job.fileSystem(srcPath.toUri(), cfg);
+
+            if (extract) {
+                File archivesPath = new File(jobLocDir.getAbsolutePath(), ".cached-archives");
+
+                if (!archivesPath.exists() && !archivesPath.mkdir())
+                    throw new IOException("Failed to create directory " +
+                        "[path=" + archivesPath + ", jobId=" + jobId + ']');
+
+                File archiveFile = new File(archivesPath, locName);
+
+                FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg);
+
+                String archiveNameLC = archiveFile.getName().toLowerCase();
+
+                if (archiveNameLC.endsWith(".jar"))
+                    RunJar.unJar(archiveFile, dstPath);
+                else if (archiveNameLC.endsWith(".zip"))
+                    FileUtil.unZip(archiveFile, dstPath);
+                else if (archiveNameLC.endsWith(".tar.gz") ||
+                    archiveNameLC.endsWith(".tgz") ||
+                    archiveNameLC.endsWith(".tar"))
+                    FileUtil.unTar(archiveFile, dstPath);
+                else
+                    throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']');
+            }
+            else
+                FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg);
+        }
+
+        if (!res.isEmpty() && rsrcNameProp != null)
+            ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
+    }
+
+    /**
+     * Prepares working directory for the task.
+     *
+     * <ul>
+     *     <li>Creates working directory.</li>
+     *     <li>Creates symbolic links to all job resources in working directory.</li>
+     * </ul>
+     *
+     * @param path Path to working directory of the task.
+     * @throws IgniteCheckedException If fails.
+     */
+    public void prepareTaskWorkDir(File path) throws IgniteCheckedException {
+        try {
+            if (path.exists())
+                throw new IOException("Task local directory already exists: " + path);
+
+            if (!path.mkdir())
+                throw new IOException("Failed to create directory: " + path);
+
+            for (File resource : rsrcSet) {
+                File symLink = new File(path, resource.getName());
+
+                try {
+                    Files.createSymbolicLink(symLink.toPath(), resource.toPath());
+                }
+                catch (IOException e) {
+                    String msg = "Unable to create symlink \"" + symLink + "\" to \"" + resource + "\".";
+
+                    if (U.isWindows() && e instanceof FileSystemException)
+                        msg += "\n\nAbility to create symbolic links is required!\n" +
+                                "On Windows platform you have to grant permission 'Create symbolic links'\n" +
+                                "to your user or run the Accelerator as Administrator.\n";
+
+                    throw new IOException(msg, e);
+                }
+            }
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException("Unable to prepare local working directory for the task " +
+                 "[jobId=" + jobId + ", path=" + path+ ']', e);
+        }
+    }
+
+    /**
+     * Cleans up job staging directory.
+     */
+    public void cleanupStagingDirectory() {
+        try {
+            if (stagingDir != null) {
+                FileSystem fs = job.fileSystem(stagingDir.toUri(), ctx.getJobConf());
+
+                fs.delete(stagingDir, true);
+            }
+        }
+        catch (Exception e) {
+            log.error("Failed to remove job staging directory [path=" + stagingDir + ", jobId=" + jobId + ']' , e);
+        }
+    }
+
+    /**
+     * Returns array of class path for current job.
+     *
+     * @return Class path collection.
+     */
+    @Nullable public URL[] classPath() {
+        return clsPath;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
new file mode 100644
index 0000000..fafa79b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+
+/**
+ * Hadoop map task implementation for v2 API.
+ */
+public class HadoopV2MapTask extends HadoopV2Task {
+    /**
+     * @param taskInfo Task info.
+     */
+    public HadoopV2MapTask(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings({"ConstantConditions", "unchecked"})
+    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
+        OutputFormat outputFormat = null;
+        Exception err = null;
+
+        JobContextImpl jobCtx = taskCtx.jobContext();
+
+        try {
+            InputSplit nativeSplit = hadoopContext().getInputSplit();
+
+            if (nativeSplit == null)
+                throw new IgniteCheckedException("Input split cannot be null.");
+
+            InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(),
+                hadoopContext().getConfiguration());
+
+            RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopContext());
+
+            reader.initialize(nativeSplit, hadoopContext());
+
+            hadoopContext().reader(reader);
+
+            HadoopJobInfo jobInfo = taskCtx.job().info();
+
+            outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx);
+
+            Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopContext().getConfiguration());
+
+            try {
+                mapper.run(new WrappedMapper().getMapContext(hadoopContext()));
+            }
+            finally {
+                closeWriter();
+            }
+
+            commit(outputFormat);
+        }
+        catch (InterruptedException e) {
+            err = e;
+
+            Thread.currentThread().interrupt();
+
+            throw new IgniteInterruptedCheckedException(e);
+        }
+        catch (Exception e) {
+            err = e;
+
+            throw new IgniteCheckedException(e);
+        }
+        finally {
+            if (err != null)
+                abort(outputFormat);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
new file mode 100644
index 0000000..e199ede
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Partitioner;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
+
+/**
+ * Hadoop partitioner adapter for v2 API.
+ */
+public class HadoopV2Partitioner implements HadoopPartitioner {
+    /** Partitioner instance. */
+    private Partitioner<Object, Object> part;
+
+    /**
+     * @param cls Hadoop partitioner class.
+     * @param conf Job configuration.
+     */
+    public HadoopV2Partitioner(Class<? extends Partitioner<?, ?>> cls, Configuration conf) {
+        part = (Partitioner<Object, Object>) ReflectionUtils.newInstance(cls, conf);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int partition(Object key, Object val, int parts) {
+        return part.getPartition(key, val, parts);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
new file mode 100644
index 0000000..e5c2ed2
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+
+/**
+ * Hadoop reduce task implementation for v2 API.
+ */
+public class HadoopV2ReduceTask extends HadoopV2Task {
+    /** {@code True} if reduce, {@code false} if combine. */
+    private final boolean reduce;
+
+    /**
+     * Constructor.
+     *
+     * @param taskInfo Task info.
+     * @param reduce {@code True} if reduce, {@code false} if combine.
+     */
+    public HadoopV2ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) {
+        super(taskInfo);
+
+        this.reduce = reduce;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings({"ConstantConditions", "unchecked"})
+    @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
+        OutputFormat outputFormat = null;
+        Exception err = null;
+
+        JobContextImpl jobCtx = taskCtx.jobContext();
+
+        try {
+            outputFormat = reduce || !taskCtx.job().info().hasReducer() ? prepareWriter(jobCtx) : null;
+
+            Reducer reducer;
+            if (reduce) reducer = ReflectionUtils.newInstance(jobCtx.getReducerClass(),
+                jobCtx.getConfiguration());
+            else reducer = ReflectionUtils.newInstance(jobCtx.getCombinerClass(),
+                jobCtx.getConfiguration());
+
+            try {
+                reducer.run(new WrappedReducer().getReducerContext(hadoopContext()));
+            }
+            finally {
+                closeWriter();
+            }
+
+            commit(outputFormat);
+        }
+        catch (InterruptedException e) {
+            err = e;
+
+            Thread.currentThread().interrupt();
+
+            throw new IgniteInterruptedCheckedException(e);
+        }
+        catch (Exception e) {
+            err = e;
+
+            throw new IgniteCheckedException(e);
+        }
+        finally {
+            if (err != null)
+                abort(outputFormat);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
new file mode 100644
index 0000000..49b5ee7
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.IOException;
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+
+/**
+ * Hadoop setup task (prepares job).
+ */
+public class HadoopV2SetupTask extends HadoopV2Task {
+    /**
+     * Constructor.
+     *
+     * @param taskInfo task info.
+     */
+    public HadoopV2SetupTask(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ConstantConditions")
+    @Override protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException {
+        try {
+            JobContextImpl jobCtx = taskCtx.jobContext();
+
+            OutputFormat outputFormat = getOutputFormat(jobCtx);
+
+            outputFormat.checkOutputSpecs(jobCtx);
+
+            OutputCommitter committer = outputFormat.getOutputCommitter(hadoopContext());
+
+            if (committer != null)
+                committer.setupJob(jobCtx);
+        }
+        catch (ClassNotFoundException | IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            throw new IgniteInterruptedCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
new file mode 100644
index 0000000..f4ed668
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopFileBlock;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Hadoop API v2 splitter.
+ */
+public class HadoopV2Splitter {
+    /** */
+    private static final String[] EMPTY_HOSTS = {};
+
+    /**
+     * @param ctx Job context.
+     * @return Collection of mapped splits.
+     * @throws IgniteCheckedException If mapping failed.
+     */
+    public static Collection<HadoopInputSplit> splitJob(JobContext ctx) throws IgniteCheckedException {
+        try {
+            InputFormat<?, ?> format = ReflectionUtils.newInstance(ctx.getInputFormatClass(), ctx.getConfiguration());
+
+            assert format != null;
+
+            List<InputSplit> splits = format.getSplits(ctx);
+
+            Collection<HadoopInputSplit> res = new ArrayList<>(splits.size());
+
+            int id = 0;
+
+            for (InputSplit nativeSplit : splits) {
+                if (nativeSplit instanceof FileSplit) {
+                    FileSplit s = (FileSplit)nativeSplit;
+
+                    res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
+                }
+                else
+                    res.add(HadoopUtils.wrapSplit(id, nativeSplit, nativeSplit.getLocations()));
+
+                id++;
+            }
+
+            return res;
+        }
+        catch (IOException | ClassNotFoundException e) {
+            throw new IgniteCheckedException(e);
+        }
+        catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+
+            throw new IgniteInterruptedCheckedException(e);
+        }
+    }
+
+    /**
+     * @param clsName Input split class name.
+     * @param in Input stream.
+     * @param hosts Optional hosts.
+     * @return File block or {@code null} if it is not a {@link FileSplit} instance.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static HadoopFileBlock readFileBlock(String clsName, DataInput in, @Nullable String[] hosts)
+        throws IgniteCheckedException {
+        if (!FileSplit.class.getName().equals(clsName))
+            return null;
+
+        FileSplit split = new FileSplit();
+
+        try {
+            split.readFields(in);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+
+        if (hosts == null)
+            hosts = EMPTY_HOSTS;
+
+        return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
new file mode 100644
index 0000000..1383a61
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.IOException;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTask;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Extended Hadoop v2 task.
+ */
+public abstract class HadoopV2Task extends HadoopTask {
+    /** Hadoop context. */
+    private HadoopV2Context hadoopCtx;
+
+    /**
+     * Constructor.
+     *
+     * @param taskInfo Task info.
+     */
+    protected HadoopV2Task(HadoopTaskInfo taskInfo) {
+        super(taskInfo);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
+        HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx;
+
+        hadoopCtx = new HadoopV2Context(ctx);
+
+        run0(ctx);
+    }
+
+    /**
+     * Internal task routine.
+     *
+     * @param taskCtx Task context.
+     * @throws IgniteCheckedException
+     */
+    protected abstract void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException;
+
+    /**
+     * @return hadoop context.
+     */
+    protected HadoopV2Context hadoopContext() {
+        return hadoopCtx;
+    }
+
+    /**
+     * Create and configure an OutputFormat instance.
+     *
+     * @param jobCtx Job context.
+     * @return Instance of OutputFormat is specified in job configuration.
+     * @throws ClassNotFoundException If specified class not found.
+     */
+    protected OutputFormat getOutputFormat(JobContext jobCtx) throws ClassNotFoundException {
+        return ReflectionUtils.newInstance(jobCtx.getOutputFormatClass(), hadoopContext().getConfiguration());
+    }
+
+    /**
+     * Put write into Hadoop context and return associated output format instance.
+     *
+     * @param jobCtx Job context.
+     * @return Output format.
+     * @throws IgniteCheckedException In case of Grid exception.
+     * @throws InterruptedException In case of interrupt.
+     */
+    protected OutputFormat prepareWriter(JobContext jobCtx)
+        throws IgniteCheckedException, InterruptedException {
+        try {
+            OutputFormat outputFormat = getOutputFormat(jobCtx);
+
+            assert outputFormat != null;
+
+            OutputCommitter outCommitter = outputFormat.getOutputCommitter(hadoopCtx);
+
+            if (outCommitter != null)
+                outCommitter.setupTask(hadoopCtx);
+
+            RecordWriter writer = outputFormat.getRecordWriter(hadoopCtx);
+
+            hadoopCtx.writer(writer);
+
+            return outputFormat;
+        }
+        catch (IOException | ClassNotFoundException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /**
+     * Closes writer.
+     *
+     * @throws Exception If fails and logger hasn't been specified.
+     */
+    protected void closeWriter() throws Exception {
+        RecordWriter writer = hadoopCtx.writer();
+
+        if (writer != null)
+            writer.close(hadoopCtx);
+    }
+
+    /**
+     * Setup task.
+     *
+     * @param outputFormat Output format.
+     * @throws IOException In case of IO exception.
+     * @throws InterruptedException In case of interrupt.
+     */
+    protected void setup(@Nullable OutputFormat outputFormat) throws IOException, InterruptedException {
+        if (hadoopCtx.writer() != null) {
+            assert outputFormat != null;
+
+            outputFormat.getOutputCommitter(hadoopCtx).setupTask(hadoopCtx);
+        }
+    }
+
+    /**
+     * Commit task.
+     *
+     * @param outputFormat Output format.
+     * @throws IgniteCheckedException In case of Grid exception.
+     * @throws IOException In case of IO exception.
+     * @throws InterruptedException In case of interrupt.
+     */
+    protected void commit(@Nullable OutputFormat outputFormat) throws IgniteCheckedException, IOException, InterruptedException {
+        if (hadoopCtx.writer() != null) {
+            assert outputFormat != null;
+
+            OutputCommitter outputCommitter = outputFormat.getOutputCommitter(hadoopCtx);
+
+            if (outputCommitter.needsTaskCommit(hadoopCtx))
+                outputCommitter.commitTask(hadoopCtx);
+        }
+    }
+
+    /**
+     * Abort task.
+     *
+     * @param outputFormat Output format.
+     */
+    protected void abort(@Nullable OutputFormat outputFormat) {
+        if (hadoopCtx.writer() != null) {
+            assert outputFormat != null;
+
+            try {
+                outputFormat.getOutputCommitter(hadoopCtx).abortTask(hadoopCtx);
+            }
+            catch (IOException ignore) {
+                // Ignore.
+            }
+            catch (InterruptedException ignore) {
+                Thread.currentThread().interrupt();
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancel() {
+        hadoopCtx.cancel();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
new file mode 100644
index 0000000..4b1121c
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java
@@ -0,0 +1,560 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.DataInput;
+import java.io.File;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Comparator;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.serializer.Deserializer;
+import org.apache.hadoop.io.serializer.Serialization;
+import org.apache.hadoop.io.serializer.SerializationFactory;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContextImpl;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.TaskAttemptID;
+import org.apache.hadoop.mapred.TaskID;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobSubmissionFiles;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopClassLoader;
+import org.apache.ignite.internal.processors.hadoop.HadoopInputSplit;
+import org.apache.ignite.internal.processors.hadoop.HadoopJob;
+import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
+import org.apache.ignite.internal.processors.hadoop.HadoopPartitioner;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.processors.hadoop.HadoopTask;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskContext;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo;
+import org.apache.ignite.internal.processors.hadoop.HadoopTaskType;
+import org.apache.ignite.internal.processors.hadoop.HadoopUtils;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounter;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters;
+import org.apache.ignite.internal.processors.hadoop.counter.HadoopCountersImpl;
+import org.apache.ignite.internal.processors.hadoop.fs.HadoopLazyConcurrentMap;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1CleanupTask;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1MapTask;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1Partitioner;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1ReduceTask;
+import org.apache.ignite.internal.processors.hadoop.v1.HadoopV1SetupTask;
+import org.apache.ignite.internal.processors.igfs.IgfsUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.jobLocalDir;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.taskLocalDir;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.transformException;
+import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.unwrapSplit;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.FsCacheKey;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.createHadoopLazyConcurrentMap;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.fileSystemForMrUserWithCaching;
+import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.PARAM_IGFS_PREFER_LOCAL_WRITES;
+
+/**
+ * Context for task execution.
+ */
+public class HadoopV2TaskContext extends HadoopTaskContext {
+    /** */
+    private static final boolean COMBINE_KEY_GROUPING_SUPPORTED;
+
+    /** Lazy per-user file system cache used by the Hadoop task. */
+    private static final HadoopLazyConcurrentMap<FsCacheKey, FileSystem> fsMap
+        = createHadoopLazyConcurrentMap();
+
+    /**
+     * This method is called with reflection upon Job finish with class loader of each task.
+     * This will clean up all the Fs created for specific task.
+     * Each class loader sees uses its own instance of <code>fsMap<code/> since the class loaders
+     * are different.
+     *
+     * @throws IgniteCheckedException On error.
+     */
+    public static void close() throws IgniteCheckedException {
+        fsMap.close();
+    }
+
+    /**
+     * Check for combiner grouping support (available since Hadoop 2.3).
+     */
+    static {
+        boolean ok;
+
+        try {
+            JobContext.class.getDeclaredMethod("getCombinerKeyGroupingComparator");
+
+            ok = true;
+        }
+        catch (NoSuchMethodException ignore) {
+            ok = false;
+        }
+
+        COMBINE_KEY_GROUPING_SUPPORTED = ok;
+    }
+
+    /** Flag is set if new context-object code is used for running the mapper. */
+    private final boolean useNewMapper;
+
+    /** Flag is set if new context-object code is used for running the reducer. */
+    private final boolean useNewReducer;
+
+    /** Flag is set if new context-object code is used for running the combiner. */
+    private final boolean useNewCombiner;
+
+    /** */
+    private final JobContextImpl jobCtx;
+
+    /** Set if task is to cancelling. */
+    private volatile boolean cancelled;
+
+    /** Current task. */
+    private volatile HadoopTask task;
+
+    /** Local node ID */
+    private final UUID locNodeId;
+
+    /** Counters for task. */
+    private final HadoopCounters cntrs = new HadoopCountersImpl();
+
+    /**
+     * @param taskInfo Task info.
+     * @param job Job.
+     * @param jobId Job ID.
+     * @param locNodeId Local node ID.
+     * @param jobConfDataInput DataInput for read JobConf.
+     */
+    public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJob job, HadoopJobId jobId,
+        @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException {
+        super(taskInfo, job);
+        this.locNodeId = locNodeId;
+
+        // Before create JobConf instance we should set new context class loader.
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(getClass().getClassLoader());
+
+        try {
+            JobConf jobConf = new JobConf();
+
+            try {
+                jobConf.readFields(jobConfDataInput);
+            }
+            catch (IOException e) {
+                throw new IgniteCheckedException(e);
+            }
+
+            // For map-reduce jobs prefer local writes.
+            jobConf.setBooleanIfUnset(PARAM_IGFS_PREFER_LOCAL_WRITES, true);
+
+            jobCtx = new JobContextImpl(jobConf, new JobID(jobId.globalId().toString(), jobId.localId()));
+
+            useNewMapper = jobConf.getUseNewMapper();
+            useNewReducer = jobConf.getUseNewReducer();
+            useNewCombiner = jobConf.getCombinerClass() == null;
+        }
+        finally {
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public <T extends HadoopCounter> T counter(String grp, String name, Class<T> cls) {
+        return cntrs.counter(grp, name, cls);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopCounters counters() {
+        return cntrs;
+    }
+
+    /**
+     * Creates appropriate task from current task info.
+     *
+     * @return Task.
+     */
+    private HadoopTask createTask() {
+        boolean isAbort = taskInfo().type() == HadoopTaskType.ABORT;
+
+        switch (taskInfo().type()) {
+            case SETUP:
+                return useNewMapper ? new HadoopV2SetupTask(taskInfo()) : new HadoopV1SetupTask(taskInfo());
+
+            case MAP:
+                return useNewMapper ? new HadoopV2MapTask(taskInfo()) : new HadoopV1MapTask(taskInfo());
+
+            case REDUCE:
+                return useNewReducer ? new HadoopV2ReduceTask(taskInfo(), true) :
+                    new HadoopV1ReduceTask(taskInfo(), true);
+
+            case COMBINE:
+                return useNewCombiner ? new HadoopV2ReduceTask(taskInfo(), false) :
+                    new HadoopV1ReduceTask(taskInfo(), false);
+
+            case COMMIT:
+            case ABORT:
+                return useNewReducer ? new HadoopV2CleanupTask(taskInfo(), isAbort) :
+                    new HadoopV1CleanupTask(taskInfo(), isAbort);
+
+            default:
+                return null;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void run() throws IgniteCheckedException {
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf().getClassLoader());
+
+        try {
+            try {
+                task = createTask();
+            }
+            catch (Throwable e) {
+                if (e instanceof Error)
+                    throw e;
+
+                throw transformException(e);
+            }
+
+            if (cancelled)
+                throw new HadoopTaskCancelledException("Task cancelled.");
+
+            try {
+                task.run(this);
+            }
+            catch (Throwable e) {
+                if (e instanceof Error)
+                    throw e;
+
+                throw transformException(e);
+            }
+        }
+        finally {
+            task = null;
+
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancel() {
+        cancelled = true;
+
+        HadoopTask t = task;
+
+        if (t != null)
+            t.cancel();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareTaskEnvironment() throws IgniteCheckedException {
+        File locDir;
+
+        switch(taskInfo().type()) {
+            case MAP:
+            case REDUCE:
+                job().prepareTaskEnvironment(taskInfo());
+
+                locDir = taskLocalDir(locNodeId, taskInfo());
+
+                break;
+
+            default:
+                locDir = jobLocalDir(locNodeId, taskInfo().jobId());
+        }
+
+        ClassLoader oldLdr = HadoopUtils.setContextClassLoader(jobConf().getClassLoader());
+
+        try {
+            FileSystem.get(jobConf());
+
+            LocalFileSystem locFs = FileSystem.getLocal(jobConf());
+
+            locFs.setWorkingDirectory(new Path(locDir.getAbsolutePath()));
+        }
+        catch (Throwable e) {
+            if (e instanceof Error)
+                throw (Error)e;
+
+            throw transformException(e);
+        }
+        finally {
+            HadoopUtils.restoreContextClassLoader(oldLdr);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cleanupTaskEnvironment() throws IgniteCheckedException {
+        job().cleanupTaskEnvironment(taskInfo());
+    }
+
+    /**
+     * Creates Hadoop attempt ID.
+     *
+     * @return Attempt ID.
+     */
+    public TaskAttemptID attemptId() {
+        TaskID tid = new TaskID(jobCtx.getJobID(), taskType(taskInfo().type()), taskInfo().taskNumber());
+
+        return new TaskAttemptID(tid, taskInfo().attempt());
+    }
+
+    /**
+     * @param type Task type.
+     * @return Hadoop task type.
+     */
+    private TaskType taskType(HadoopTaskType type) {
+        switch (type) {
+            case SETUP:
+                return TaskType.JOB_SETUP;
+            case MAP:
+            case COMBINE:
+                return TaskType.MAP;
+
+            case REDUCE:
+                return TaskType.REDUCE;
+
+            case COMMIT:
+            case ABORT:
+                return TaskType.JOB_CLEANUP;
+
+            default:
+                return null;
+        }
+    }
+
+    /**
+     * Gets job configuration of the task.
+     *
+     * @return Job configuration.
+     */
+    public JobConf jobConf() {
+        return jobCtx.getJobConf();
+    }
+
+    /**
+     * Gets job context of the task.
+     *
+     * @return Job context.
+     */
+    public JobContextImpl jobContext() {
+        return jobCtx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopPartitioner partitioner() throws IgniteCheckedException {
+        Class<?> partClsOld = jobConf().getClass("mapred.partitioner.class", null);
+
+        if (partClsOld != null)
+            return new HadoopV1Partitioner(jobConf().getPartitionerClass(), jobConf());
+
+        try {
+            return new HadoopV2Partitioner(jobCtx.getPartitionerClass(), jobConf());
+        }
+        catch (ClassNotFoundException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /**
+     * Gets serializer for specified class.
+     *
+     * @param cls Class.
+     * @param jobConf Job configuration.
+     * @return Appropriate serializer.
+     */
+    @SuppressWarnings("unchecked")
+    private HadoopSerialization getSerialization(Class<?> cls, Configuration jobConf) throws IgniteCheckedException {
+        A.notNull(cls, "cls");
+
+        SerializationFactory factory = new SerializationFactory(jobConf);
+
+        Serialization<?> serialization = factory.getSerialization(cls);
+
+        if (serialization == null)
+            throw new IgniteCheckedException("Failed to find serialization for: " + cls.getName());
+
+        if (serialization.getClass() == WritableSerialization.class)
+            return new HadoopWritableSerialization((Class<? extends Writable>)cls);
+
+        return new HadoopSerializationWrapper(serialization, cls);
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopSerialization keySerialization() throws IgniteCheckedException {
+        return getSerialization(jobCtx.getMapOutputKeyClass(), jobConf());
+    }
+
+    /** {@inheritDoc} */
+    @Override public HadoopSerialization valueSerialization() throws IgniteCheckedException {
+        return getSerialization(jobCtx.getMapOutputValueClass(), jobConf());
+    }
+
+    /** {@inheritDoc} */
+    @Override public Comparator<Object> sortComparator() {
+        return (Comparator<Object>)jobCtx.getSortComparator();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Comparator<Object> groupComparator() {
+        Comparator<?> res;
+
+        switch (taskInfo().type()) {
+            case COMBINE:
+                res = COMBINE_KEY_GROUPING_SUPPORTED ?
+                    jobContext().getCombinerKeyGroupingComparator() : jobContext().getGroupingComparator();
+
+                break;
+
+            case REDUCE:
+                res = jobContext().getGroupingComparator();
+
+                break;
+
+            default:
+                return null;
+        }
+
+        if (res != null && res.getClass() != sortComparator().getClass())
+            return (Comparator<Object>)res;
+
+        return null;
+    }
+
+    /**
+     * @param split Split.
+     * @return Native Hadoop split.
+     * @throws IgniteCheckedException if failed.
+     */
+    @SuppressWarnings("unchecked")
+    public Object getNativeSplit(HadoopInputSplit split) throws IgniteCheckedException {
+        if (split instanceof HadoopExternalSplit)
+            return readExternalSplit((HadoopExternalSplit)split);
+
+        if (split instanceof HadoopSplitWrapper)
+            return unwrapSplit((HadoopSplitWrapper)split);
+
+        throw new IllegalStateException("Unknown split: " + split);
+    }
+
+    /**
+     * @param split External split.
+     * @return Native input split.
+     * @throws IgniteCheckedException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    private Object readExternalSplit(HadoopExternalSplit split) throws IgniteCheckedException {
+        Path jobDir = new Path(jobConf().get(MRJobConfig.MAPREDUCE_JOB_DIR));
+
+        FileSystem fs;
+
+        try {
+            // This assertion uses .startsWith() instead of .equals() because task class loaders may
+            // be reused between tasks of the same job.
+            assert ((HadoopClassLoader)getClass().getClassLoader()).name()
+                .startsWith(HadoopClassLoader.nameForTask(taskInfo(), true));
+
+            // We also cache Fs there, all them will be cleared explicitly upon the Job end.
+            fs = fileSystemForMrUserWithCaching(jobDir.toUri(), jobConf(), fsMap);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+
+        try (
+            FSDataInputStream in = fs.open(JobSubmissionFiles.getJobSplitFile(jobDir))) {
+
+            in.seek(split.offset());
+
+            String clsName = Text.readString(in);
+
+            Class<?> cls = jobConf().getClassByName(clsName);
+
+            assert cls != null;
+
+            Serialization serialization = new SerializationFactory(jobConf()).getSerialization(cls);
+
+            Deserializer deserializer = serialization.getDeserializer(cls);
+
+            deserializer.open(in);
+
+            Object res = deserializer.deserialize(null);
+
+            deserializer.close();
+
+            assert res != null;
+
+            return res;
+        }
+        catch (IOException | ClassNotFoundException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public <T> T runAsJobOwner(final Callable<T> c) throws IgniteCheckedException {
+        String user = job.info().user();
+
+        user = IgfsUtils.fixUserName(user);
+
+        assert user != null;
+
+        String ugiUser;
+
+        try {
+            UserGroupInformation currUser = UserGroupInformation.getCurrentUser();
+
+            assert currUser != null;
+
+            ugiUser = currUser.getShortUserName();
+        }
+        catch (IOException ioe) {
+            throw new IgniteCheckedException(ioe);
+        }
+
+        try {
+            if (F.eq(user, ugiUser))
+                // if current UGI context user is the same, do direct call:
+                return c.call();
+            else {
+                UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, user);
+
+                return ugi.doAs(new PrivilegedExceptionAction<T>() {
+                    @Override public T run() throws Exception {
+                        return c.call();
+                    }
+                });
+            }
+        }
+        catch (Exception e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
new file mode 100644
index 0000000..f46f068
--- /dev/null
+++ b/modules/hadoop-impl/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.hadoop.v2;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import org.apache.hadoop.io.Writable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.hadoop.HadoopSerialization;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Optimized serialization for Hadoop {@link Writable} types.
+ */
+public class HadoopWritableSerialization implements HadoopSerialization {
+    /** */
+    private final Class<? extends Writable> cls;
+
+    /**
+     * @param cls Class.
+     */
+    public HadoopWritableSerialization(Class<? extends Writable> cls) {
+        assert cls != null;
+
+        this.cls = cls;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void write(DataOutput out, Object obj) throws IgniteCheckedException {
+        assert cls.isAssignableFrom(obj.getClass()) : cls + " " + obj.getClass();
+
+        try {
+            ((Writable)obj).write(out);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public Object read(DataInput in, @Nullable Object obj) throws IgniteCheckedException {
+        Writable w = obj == null ? U.newInstance(cls) : cls.cast(obj);
+
+        try {
+            w.readFields(in);
+        }
+        catch (IOException e) {
+            throw new IgniteCheckedException(e);
+        }
+
+        return w;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        // No-op.
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/modules/hadoop-impl/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
new file mode 100644
index 0000000..8d5957b
--- /dev/null
+++ b/modules/hadoop-impl/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
@@ -0,0 +1 @@
+org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider

http://git-wip-us.apache.org/repos/asf/ignite/blob/857cdcde/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
----------------------------------------------------------------------
diff --git a/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java b/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
new file mode 100644
index 0000000..5a20a75
--- /dev/null
+++ b/modules/hadoop-impl/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.client.hadoop;
+
+import org.apache.ignite.configuration.HadoopConfiguration;
+
+/**
+ * Hadoop client protocol tests in embedded process mode.
+ */
+public class HadoopClientProtocolEmbeddedSelfTest extends HadoopClientProtocolSelfTest {
+    /** {@inheritDoc} */
+    @Override public HadoopConfiguration hadoopConfiguration(String gridName) {
+        HadoopConfiguration cfg = super.hadoopConfiguration(gridName);
+
+        // TODO: IGNITE-404: Uncomment when fixed.
+        //cfg.setExternalExecution(false);
+
+        return cfg;
+    }
+}
\ No newline at end of file